summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/proc.c4
-rw-r--r--drivers/acpi/sbs.c29
-rw-r--r--drivers/ata/pata_arasan_cf.c37
-rw-r--r--drivers/ata/sata_svw.c29
-rw-r--r--drivers/block/DAC960.c8
-rw-r--r--drivers/block/amiflop.c3
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/ataflop.c5
-rw-r--r--drivers/block/brd.c3
-rw-r--r--drivers/block/cciss.c29
-rw-r--r--drivers/block/cciss_scsi.c96
-rw-r--r--drivers/block/cpqarray.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c246
-rw-r--r--drivers/block/drbd/drbd_bitmap.c13
-rw-r--r--drivers/block/drbd/drbd_int.h179
-rw-r--r--drivers/block/drbd/drbd_main.c256
-rw-r--r--drivers/block/drbd/drbd_nl.c200
-rw-r--r--drivers/block/drbd/drbd_proc.c10
-rw-r--r--drivers/block/drbd/drbd_receiver.c16
-rw-r--r--drivers/block/drbd/drbd_req.c192
-rw-r--r--drivers/block/drbd/drbd_req.h8
-rw-r--r--drivers/block/drbd/drbd_state.c28
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c24
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c79
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/block/paride/pcd.c3
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pf.c9
-rw-r--r--drivers/block/pktcdvd.c108
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c2864
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/virtio_blk.c148
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/xsysace.c3
-rw-r--r--drivers/block/z2ram.c6
-rw-r--r--drivers/bus/Kconfig7
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/mvebu-mbus.c870
-rw-r--r--drivers/cdrom/gdrom.c3
-rw-r--r--drivers/char/ds1620.c34
-rw-r--r--drivers/char/efirtc.c83
-rw-r--r--drivers/char/genrtc.c48
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c113
-rw-r--r--drivers/char/hw_random/exynos-rng.c3
-rw-r--r--drivers/char/hw_random/mxc-rnga.c21
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c190
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/mem.c36
-rw-r--r--drivers/char/virtio_console.c14
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/mmp/clk-mmp2.c2
-rw-r--r--drivers/clk/mmp/clk-pxa168.c2
-rw-r--r--drivers/clk/mmp/clk-pxa910.c2
-rw-r--r--drivers/clk/mxs/clk-imx23.c42
-rw-r--r--drivers/clk/mxs/clk-imx28.c42
-rw-r--r--drivers/clk/samsung/Makefile8
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1082
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c522
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c138
-rw-r--r--drivers/clk/samsung/clk-pll.c419
-rw-r--r--drivers/clk/samsung/clk-pll.h41
-rw-r--r--drivers/clk/samsung/clk.c320
-rw-r--r--drivers/clk/samsung/clk.h287
-rw-r--r--drivers/clk/socfpga/clk.c163
-rw-r--r--drivers/clk/spear/spear1310_clock.c64
-rw-r--r--drivers/clk/spear/spear1340_clock.c63
-rw-r--r--drivers/clk/spear/spear3xx_clock.c60
-rw-r--r--drivers/clk/spear/spear6xx_clock.c31
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c11
-rw-r--r--drivers/clk/tegra/clk-periph.c14
-rw-r--r--drivers/clk/tegra/clk-pll.c1144
-rw-r--r--drivers/clk/tegra/clk-tegra114.c2085
-rw-r--r--drivers/clk/tegra/clk-tegra20.c220
-rw-r--r--drivers/clk/tegra/clk-tegra30.c279
-rw-r--r--drivers/clk/tegra/clk.c14
-rw-r--r--drivers/clk/tegra/clk.h98
-rw-r--r--drivers/clk/ux500/clk-prcc.c1
-rw-r--r--drivers/clk/ux500/u8500_clk.c145
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile8
-rw-r--r--drivers/clocksource/arm_arch_timer.c33
-rw-r--r--drivers/clocksource/bcm2835_timer.c12
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c436
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c3
-rw-r--r--drivers/clocksource/clksrc-of.c5
-rw-r--r--drivers/clocksource/em_sti.c13
-rw-r--r--drivers/clocksource/exynos_mct.c558
-rw-r--r--drivers/clocksource/mxs_timer.c304
-rw-r--r--drivers/clocksource/nomadik-mtu.c4
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c494
-rw-r--r--drivers/clocksource/sh_cmt.c189
-rw-r--r--drivers/clocksource/sh_mtu2.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/sun4i_timer.c (renamed from drivers/clocksource/sunxi_timer.c)94
-rw-r--r--drivers/clocksource/tegra20_timer.c75
-rw-r--r--drivers/clocksource/timer-marco.c299
-rw-r--r--drivers/clocksource/timer-prima2.c215
-rw-r--r--drivers/clocksource/vt8500_timer.c16
-rw-r--r--drivers/crypto/Kconfig18
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/atmel-aes.c471
-rw-r--r--drivers/crypto/atmel-sha-regs.h7
-rw-r--r--drivers/crypto/atmel-sha.c586
-rw-r--r--drivers/crypto/atmel-tdes-regs.h2
-rw-r--r--drivers/crypto/atmel-tdes.c394
-rw-r--r--drivers/crypto/bfin_crc.c6
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/crypto/caam/caamhash.c4
-rw-r--r--drivers/crypto/caam/ctrl.c3
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/key_gen.h2
-rw-r--r--drivers/crypto/caam/regs.h4
-rw-r--r--drivers/crypto/omap-aes.c15
-rw-r--r--drivers/crypto/omap-sham.c15
-rw-r--r--drivers/crypto/picoxcell_crypto.c4
-rw-r--r--drivers/crypto/sahara.c1070
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c7
-rw-r--r--drivers/dma/mxs-dma.c109
-rw-r--r--drivers/firmware/Kconfig37
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/efi/Kconfig39
-rw-r--r--drivers/firmware/efi/Makefile6
-rw-r--r--drivers/firmware/efi/efi-pstore.c252
-rw-r--r--drivers/firmware/efi/efi.c134
-rw-r--r--drivers/firmware/efi/efivars.c617
-rw-r--r--drivers/firmware/efi/vars.c1041
-rw-r--r--drivers/firmware/efivars.c2117
-rw-r--r--drivers/firmware/google/gsmi.c31
-rw-r--r--drivers/gpio/Kconfig23
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-74x164.c8
-rw-r--r--drivers/gpio/gpio-adp5520.c4
-rw-r--r--drivers/gpio/gpio-em.c94
-rw-r--r--drivers/gpio/gpio-generic.c62
-rw-r--r--drivers/gpio/gpio-grgpio.c505
-rw-r--r--drivers/gpio/gpio-ich.c17
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c1
-rw-r--r--drivers/gpio/gpio-max7300.c4
-rw-r--r--drivers/gpio/gpio-max7301.c7
-rw-r--r--drivers/gpio/gpio-max732x.c5
-rw-r--r--drivers/gpio/gpio-mc33880.c19
-rw-r--r--drivers/gpio/gpio-mcp23s08.c137
-rw-r--r--drivers/gpio/gpio-msm-v1.c220
-rw-r--r--drivers/gpio/gpio-msm-v2.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c85
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-omap.c119
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pcf857x.c8
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c158
-rw-r--r--drivers/gpio/gpio-rcar.c396
-rw-r--r--drivers/gpio/gpio-samsung.c10
-rw-r--r--drivers/gpio/gpio-sch.c111
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c8
-rw-r--r--drivers/gpio/gpio-tegra.c35
-rw-r--r--drivers/gpio/gpio-timberdale.c3
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-ucb1400.c19
-rw-r--r--drivers/gpio/gpio-viperboard.c4
-rw-r--r--drivers/gpio/gpio-vt8500.c355
-rw-r--r--drivers/gpio/gpiolib-acpi.c217
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c43
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c38
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_cache.c7
-rw-r--r--drivers/gpu/drm/drm_crtc.c411
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c9
-rw-r--r--drivers/gpu/drm/drm_edid.c279
-rw-r--r--drivers/gpu/drm/drm_edid_load.c21
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c22
-rw-r--r--drivers/gpu/drm/drm_pci.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c99
-rw-r--r--drivers/gpu/drm/drm_proc.c43
-rw-r--r--drivers/gpu/drm/drm_stub.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c273
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c54
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c712
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c22
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h7
-rw-r--r--drivers/gpu/drm/gma500/Kconfig13
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c52
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c3
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c7
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c3
-rw-r--r--drivers/gpu/drm/gma500/power.c17
-rw-r--r--drivers/gpu/drm/gma500/power.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c154
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c33
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c433
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c90
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c206
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h94
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c135
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c262
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c659
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h293
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c15
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c27
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1535
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c526
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h155
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c166
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c49
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c245
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c131
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c243
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c55
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/Makefile26
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/base.c)186
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv04.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv10.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv20.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv30.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv40.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv50.c)20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c)30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nve0.c)36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c230
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/device.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/device.h)1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c (renamed from drivers/gpu/drm/nouveau/nouveau_calc.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c (renamed from drivers/gpu/drm/nouveau/nv04_crtc.c)5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c (renamed from drivers/gpu/drm/nouveau/nv04_cursor.c)3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c (renamed from drivers/gpu/drm/nouveau/nv04_dac.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c (renamed from drivers/gpu/drm/nouveau/nv04_dfp.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c (renamed from drivers/gpu/drm/nouveau/nv04_display.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h (renamed from drivers/gpu/drm/nouveau/nv04_display.h)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c (renamed from drivers/gpu/drm/nouveau/nouveau_hw.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h (renamed from drivers/gpu/drm/nouveau/nouveau_hw.h)3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nvreg.h (renamed from drivers/gpu/drm/nouveau/nvreg.h)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv_modes.c)4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c (renamed from drivers/gpu/drm/nouveau/nv04_tv.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv.c)4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h (renamed from drivers/gpu/drm/nouveau/nv17_tv.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c21
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c165
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c6
-rw-r--r--drivers/gpu/drm/qxl/Kconfig10
-rw-r--r--drivers/gpu/drm/qxl/Makefile9
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c685
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c141
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h879
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c982
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c390
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c145
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h566
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c93
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c567
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c97
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c149
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c176
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c411
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c97
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c302
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c365
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h112
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c304
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c581
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1187
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c169
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h48
-rw-r--r--drivers/gpu/drm/radeon/ni.c414
-rw-r--r--drivers/gpu/drm/radeon/nid.h21
-rw-r--r--drivers/gpu/drm/radeon/r100.c77
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c404
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c64
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h72
-rw-r--r--drivers/gpu/drm/radeon/radeon.h94
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c100
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c831
-rw-r--r--drivers/gpu/drm/radeon/rs600.c52
-rw-r--r--drivers/gpu/drm/radeon/rs690.c23
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c909
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h43
-rw-r--r--drivers/gpu/drm/radeon/si.c979
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/drm.c217
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/host1x/Kconfig24
-rw-r--r--drivers/gpu/host1x/Makefile20
-rw-r--r--drivers/gpu/host1x/cdma.c491
-rw-r--r--drivers/gpu/host1x/cdma.h100
-rw-r--r--drivers/gpu/host1x/channel.c126
-rw-r--r--drivers/gpu/host1x/channel.h52
-rw-r--r--drivers/gpu/host1x/debug.c210
-rw-r--r--drivers/gpu/host1x/debug.h51
-rw-r--r--drivers/gpu/host1x/dev.c246
-rw-r--r--drivers/gpu/host1x/dev.h308
-rw-r--r--drivers/gpu/host1x/drm/Kconfig (renamed from drivers/gpu/drm/tegra/Kconfig)20
-rw-r--r--drivers/gpu/host1x/drm/dc.c (renamed from drivers/gpu/drm/tegra/dc.c)31
-rw-r--r--drivers/gpu/host1x/drm/dc.h (renamed from drivers/gpu/drm/tegra/dc.h)0
-rw-r--r--drivers/gpu/host1x/drm/drm.c640
-rw-r--r--drivers/gpu/host1x/drm/drm.h (renamed from drivers/gpu/drm/tegra/drm.h)68
-rw-r--r--drivers/gpu/host1x/drm/fb.c374
-rw-r--r--drivers/gpu/host1x/drm/gem.c270
-rw-r--r--drivers/gpu/host1x/drm/gem.h59
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c339
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c (renamed from drivers/gpu/drm/tegra/hdmi.c)5
-rw-r--r--drivers/gpu/host1x/drm/hdmi.h (renamed from drivers/gpu/drm/tegra/hdmi.h)0
-rw-r--r--drivers/gpu/host1x/drm/output.c (renamed from drivers/gpu/drm/tegra/output.c)0
-rw-r--r--drivers/gpu/host1x/drm/rgb.c (renamed from drivers/gpu/drm/tegra/rgb.c)0
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/host1x_client.h35
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c326
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c168
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c322
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x01.h25
-rw-r--r--drivers/gpu/host1x/hw/host1x01_hardware.h143
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_channel.h120
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h174
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c143
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c114
-rw-r--r--drivers/gpu/host1x/intr.c354
-rw-r--r--drivers/gpu/host1x/intr.h102
-rw-r--r--drivers/gpu/host1x/job.c603
-rw-r--r--drivers/gpu/host1x/job.h162
-rw-r--r--drivers/gpu/host1x/syncpt.c387
-rw-r--r--drivers/gpu/host1x/syncpt.h165
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/abx500.c2
-rw-r--r--drivers/hwmon/lm75.c92
-rw-r--r--drivers/hwspinlock/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c6
-rw-r--r--drivers/i2c/busses/i2c-at91.c58
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c77
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c39
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c73
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c83
-rw-r--r--drivers/i2c/busses/i2c-gpio.c75
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c3
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c5
-rw-r--r--drivers/i2c/busses/i2c-mxs.c144
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c6
-rw-r--r--drivers/i2c/busses/i2c-octeon.c10
-rw-r--r--drivers/i2c/busses/i2c-powermac.c10
-rw-r--r--drivers/i2c/busses/i2c-puv3.c10
-rw-r--r--drivers/i2c/busses/i2c-pxa.c20
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c48
-rw-r--r--drivers/i2c/busses/i2c-tegra.c26
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c5
-rw-r--r--drivers/i2c/busses/i2c-xiic.c6
-rw-r--r--drivers/i2c/i2c-core.c246
-rw-r--r--drivers/i2c/i2c-mux.c9
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile2
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c251
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c17
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c6
-rw-r--r--drivers/ide/ide-cd.c6
-rw-r--r--drivers/ide/ide-disk_proc.c8
-rw-r--r--drivers/ide/ide-floppy_proc.c2
-rw-r--r--drivers/ide/ide-gd.c4
-rw-r--r--drivers/ide/ide-proc.c22
-rw-r--r--drivers/ide/ide-tape.c6
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c22
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c19
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c24
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h24
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c36
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c2
-rw-r--r--drivers/input/keyboard/Kconfig12
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c334
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c11
-rw-r--r--drivers/input/keyboard/omap4-keypad.c16
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c7
-rw-r--r--drivers/input/matrix-keymap.c20
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c58
-rw-r--r--drivers/iommu/amd_iommu.c145
-rw-r--r--drivers/iommu/amd_iommu_init.c154
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/dmar.c23
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c24
-rw-r--r--drivers/iommu/intel_irq_remapping.c10
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/pci.h29
-rw-r--r--drivers/iommu/shmobile-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c5
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile9
-rw-r--r--drivers/irqchip/exynos-combiner.c158
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c288
-rw-r--r--drivers/irqchip/irq-gic.c36
-rw-r--r--drivers/irqchip/irq-mxs.c121
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c547
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c307
-rw-r--r--drivers/irqchip/irq-s3c24xx.c1356
-rw-r--r--drivers/irqchip/irq-sirfsoc.c126
-rw-r--r--drivers/irqchip/irq-sun4i.c149
-rw-r--r--drivers/irqchip/irq-sunxi.c151
-rw-r--r--drivers/irqchip/irq-vic.c3
-rw-r--r--drivers/irqchip/irq-vt8500.c259
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c12
-rw-r--r--drivers/isdn/hysdn/hycapi.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c32
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c71
-rw-r--r--drivers/isdn/mISDN/timerdev.c76
-rw-r--r--drivers/leds/Kconfig115
-rw-r--r--drivers/leds/Makefile11
-rw-r--r--drivers/leds/leds-asic3.c7
-rw-r--r--drivers/leds/leds-atmel-pwm.c4
-rw-r--r--drivers/leds/leds-bd2802.c14
-rw-r--r--drivers/leds/leds-lm355x.c2
-rw-r--r--drivers/leds/leds-lm3642.c2
-rw-r--r--drivers/leds/leds-lp5521.c22
-rw-r--r--drivers/leds/leds-lp5562.c599
-rw-r--r--drivers/leds/leds-lp55xx-common.c42
-rw-r--r--drivers/leds/leds-lp55xx-common.h4
-rw-r--r--drivers/leds/leds-lt3593.c5
-rw-r--r--drivers/leds/leds-ns2.c44
-rw-r--r--drivers/leds/leds-pwm.c50
-rw-r--r--drivers/leds/leds-renesas-tpu.c3
-rw-r--r--drivers/leds/leds-tca6507.c4
-rw-r--r--drivers/leds/leds-wm8350.c5
-rw-r--r--drivers/leds/trigger/Kconfig111
-rw-r--r--drivers/leds/trigger/Makefile10
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c (renamed from drivers/leds/ledtrig-backlight.c)2
-rw-r--r--drivers/leds/trigger/ledtrig-camera.c57
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c (renamed from drivers/leds/ledtrig-cpu.c)2
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c (renamed from drivers/leds/ledtrig-default-on.c)2
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c (renamed from drivers/leds/ledtrig-gpio.c)2
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c (renamed from drivers/leds/ledtrig-heartbeat.c)2
-rw-r--r--drivers/leds/trigger/ledtrig-ide-disk.c (renamed from drivers/leds/ledtrig-ide-disk.c)0
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c (renamed from drivers/leds/ledtrig-oneshot.c)2
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c (renamed from drivers/leds/ledtrig-timer.c)1
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c (renamed from drivers/leds/ledtrig-transient.c)2
-rw-r--r--drivers/lguest/Kconfig5
-rw-r--r--drivers/lguest/core.c67
-rw-r--r--drivers/lguest/lg.h6
-rw-r--r--drivers/lguest/lguest_user.c6
-rw-r--r--drivers/lguest/page_tables.c567
-rw-r--r--drivers/lguest/x86/core.c7
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/macintosh/via-pmu.c7
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/Kconfig42
-rw-r--r--drivers/md/bcache/Makefile7
-rw-r--r--drivers/md/bcache/alloc.c599
-rw-r--r--drivers/md/bcache/bcache.h1259
-rw-r--r--drivers/md/bcache/bset.c1192
-rw-r--r--drivers/md/bcache/bset.h379
-rw-r--r--drivers/md/bcache/btree.c2503
-rw-r--r--drivers/md/bcache/btree.h405
-rw-r--r--drivers/md/bcache/closure.c345
-rw-r--r--drivers/md/bcache/closure.h672
-rw-r--r--drivers/md/bcache/debug.c565
-rw-r--r--drivers/md/bcache/debug.h54
-rw-r--r--drivers/md/bcache/io.c397
-rw-r--r--drivers/md/bcache/journal.c787
-rw-r--r--drivers/md/bcache/journal.h215
-rw-r--r--drivers/md/bcache/movinggc.c254
-rw-r--r--drivers/md/bcache/request.c1411
-rw-r--r--drivers/md/bcache/request.h62
-rw-r--r--drivers/md/bcache/stats.c246
-rw-r--r--drivers/md/bcache/stats.h58
-rw-r--r--drivers/md/bcache/super.c1987
-rw-r--r--drivers/md/bcache/sysfs.c817
-rw-r--r--drivers/md/bcache/sysfs.h110
-rw-r--r--drivers/md/bcache/trace.c26
-rw-r--r--drivers/md/bcache/util.c377
-rw-r--r--drivers/md/bcache/util.h589
-rw-r--r--drivers/md/bcache/writeback.c414
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-verity.c4
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/faulty.c6
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c21
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c133
-rw-r--r--drivers/md/raid10.c78
-rw-r--r--drivers/md/raid5.c49
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c5
-rw-r--r--drivers/media/dvb-core/dvb_net.c5
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c6
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c179
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c2
-rw-r--r--drivers/media/pci/zoran/zoran_procfs.c4
-rw-r--r--drivers/media/rc/ir-lirc-codec.c2
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/memstick/core/mspro_block.c8
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/message/fusion/mptscsih.c98
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/i2o_block.c8
-rw-r--r--drivers/message/i2o/i2o_proc.c97
-rw-r--r--drivers/mfd/88pm860x-core.c12
-rw-r--r--drivers/mfd/Kconfig1444
-rw-r--r--drivers/mfd/Makefile9
-rw-r--r--drivers/mfd/aat2870-core.c20
-rw-r--r--drivers/mfd/ab3100-otp.c14
-rw-r--r--drivers/mfd/ab8500-core.c28
-rw-r--r--drivers/mfd/ab8500-gpadc.c2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c2
-rw-r--r--drivers/mfd/adp5520.c20
-rw-r--r--drivers/mfd/arizona-core.c267
-rw-r--r--drivers/mfd/arizona-irq.c106
-rw-r--r--drivers/mfd/arizona-spi.c2
-rw-r--r--drivers/mfd/as3711.c27
-rw-r--r--drivers/mfd/cros_ec.c196
-rw-r--r--drivers/mfd/cros_ec_i2c.c201
-rw-r--r--drivers/mfd/cros_ec_spi.c375
-rw-r--r--drivers/mfd/da903x.c19
-rw-r--r--drivers/mfd/da9052-spi.c4
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/davinci_voicecodec.c12
-rw-r--r--drivers/mfd/db8500-prcmu.c388
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h204
-rw-r--r--drivers/mfd/ezx-pcap.c21
-rw-r--r--drivers/mfd/htc-pasic3.c13
-rw-r--r--drivers/mfd/intel_msic.c10
-rw-r--r--drivers/mfd/lm3533-core.c8
-rw-r--r--drivers/mfd/max77686.c2
-rw-r--r--drivers/mfd/mc13xxx-spi.c6
-rw-r--r--drivers/mfd/omap-usb-host.c209
-rw-r--r--drivers/mfd/omap-usb-tll.c219
-rw-r--r--drivers/mfd/omap-usb.h5
-rw-r--r--drivers/mfd/palmas.c7
-rw-r--r--drivers/mfd/retu-mfd.c85
-rw-r--r--drivers/mfd/rts5249.c241
-rw-r--r--drivers/mfd/rtsx_pcr.c11
-rw-r--r--drivers/mfd/rtsx_pcr.h1
-rw-r--r--drivers/mfd/si476x-cmd.c1553
-rw-r--r--drivers/mfd/si476x-i2c.c886
-rw-r--r--drivers/mfd/si476x-prop.c241
-rw-r--r--drivers/mfd/sta2x11-mfd.c11
-rw-r--r--drivers/mfd/stmpe-i2c.c1
-rw-r--r--drivers/mfd/stmpe-spi.c2
-rw-r--r--drivers/mfd/stmpe.c105
-rw-r--r--drivers/mfd/stmpe.h49
-rw-r--r--drivers/mfd/syscon.c80
-rw-r--r--drivers/mfd/tc3589x.c21
-rw-r--r--drivers/mfd/tps65090.c11
-rw-r--r--drivers/mfd/twl4030-madc.c14
-rw-r--r--drivers/mfd/twl6040.c31
-rw-r--r--drivers/mfd/ucb1400_core.c5
-rw-r--r--drivers/mfd/vexpress-config.c35
-rw-r--r--drivers/mfd/vexpress-sysreg.c4
-rw-r--r--drivers/mfd/wm5102-tables.c66
-rw-r--r--drivers/mfd/wm831x-spi.c6
-rw-r--r--drivers/mfd/wm8994-core.c88
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c1
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c2
-rw-r--r--drivers/mmc/card/block.c13
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/core.c18
-rw-r--r--drivers/mmc/core/mmc.c5
-rw-r--r--drivers/mmc/core/sdio.c4
-rw-r--r--drivers/mmc/core/sdio_bus.c23
-rw-r--r--drivers/mmc/host/Kconfig31
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c16
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c54
-rw-r--r--drivers/mmc/host/dw_mmc.c153
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/mmc/host/msm_sdcc.c1
-rw-r--r--drivers/mmc/host/mvsdio.c54
-rw-r--r--drivers/mmc/host/mxcmmc.c296
-rw-r--r--drivers/mmc/host/mxs-mmc.c63
-rw-r--r--drivers/mmc/host/omap_hsmmc.c14
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c10
-rw-r--r--drivers/mmc/host/s3cmci.c83
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c4
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c5
-rw-r--r--drivers/mmc/host/sdhci-dove.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c4
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c4
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c4
-rw-r--r--drivers/mmc/host/sdhci-pci.c4
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c7
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c49
-rw-r--r--drivers/mmc/host/sdhci-s3c-regs.h87
-rw-r--r--drivers/mmc/host/sdhci-s3c.c93
-rw-r--r--drivers/mmc/host/sdhci-sirf.c193
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-tegra.c124
-rw-r--r--drivers/mmc/host/sdhci.c39
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/chips/gen_probe.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c9
-rw-r--r--drivers/mtd/mtdblock.c4
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c51
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h3
-rw-r--r--drivers/mtd/onenand/samsung.c4
-rw-r--r--drivers/mtd/onenand/samsung.h61
-rw-r--r--drivers/mtd/sm_ftl.c3
-rw-r--r--drivers/net/bonding/bond_procfs.c4
-rw-r--r--drivers/net/caif/Kconfig14
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_virtio.c790
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c35
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c54
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c15
-rw-r--r--drivers/net/ethernet/sfc/falcon.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c29
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/pegasus.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c1767
-rw-r--r--drivers/net/virtio_net.c77
-rw-r--r--drivers/net/wireless/airo.c116
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/atmel.c69
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c375
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c68
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c38
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c609
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h3
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c4
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/xen-netback/netback.c69
-rw-r--r--drivers/nubus/nubus.c55
-rw-r--r--drivers/nubus/proc.c85
-rw-r--r--drivers/of/base.c122
-rw-r--r--drivers/parisc/led.c4
-rw-r--r--drivers/parisc/sba_iommu.c19
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/proc.c34
-rw-r--r--drivers/pinctrl/Kconfig13
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91.c3
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c19
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c111
-rw-r--r--drivers/pinctrl/pinctrl-imx.c126
-rw-r--r--drivers/pinctrl/pinctrl-imx.h29
-rw-r--r--drivers/pinctrl/pinctrl-imx35.c2088
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c1532
-rw-r--r--drivers/pinctrl/pinctrl-imx53.c1597
-rw-r--r--drivers/pinctrl/pinctrl-imx6dl.c497
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2301
-rw-r--r--drivers/pinctrl/pinctrl-imx6sl.c403
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c14
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig6
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c302
-rw-r--r--drivers/pinctrl/sh-pfc/core.h55
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c372
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c2587
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c694
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c1695
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c488
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c460
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c624
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c323
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c2664
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c333
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c480
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c642
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c638
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c55
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c620
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c354
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c296
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c148
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c491
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h135
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c8
-rw-r--r--drivers/pinctrl/vt8500/Kconfig52
-rw-r--r--drivers/pinctrl/vt8500/Makefile8
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-vt8500.c501
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8505.c532
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8650.c370
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8750.c409
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8850.c388
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c632
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.h79
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c18
-rw-r--r--drivers/pnp/isapnp/proc.c6
-rw-r--r--drivers/pnp/pnpbios/core.c9
-rw-r--r--drivers/pnp/pnpbios/proc.c4
-rw-r--r--drivers/power/rx51_battery.c1
-rw-r--r--drivers/pps/clients/pps_parport.c1
-rw-r--r--drivers/pwm/Kconfig8
-rw-r--r--drivers/pwm/pwm-ab8500.c10
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c1
-rw-r--r--drivers/pwm/pwm-imx.c6
-rw-r--r--drivers/pwm/pwm-lpc32xx.c27
-rw-r--r--drivers/pwm/pwm-mxs.c4
-rw-r--r--drivers/pwm/pwm-puv3.c1
-rw-r--r--drivers/pwm/pwm-pxa.c23
-rw-r--r--drivers/pwm/pwm-samsung.c20
-rw-r--r--drivers/pwm/pwm-spear.c11
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c6
-rw-r--r--drivers/pwm/pwm-tipwmss.c2
-rw-r--r--drivers/pwm/pwm-tipwmss.h2
-rw-r--r--drivers/pwm/pwm-twl-led.c4
-rw-r--r--drivers/pwm/pwm-twl.c6
-rw-r--r--drivers/remoteproc/Kconfig27
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c324
-rw-r--r--drivers/remoteproc/remoteproc_core.c211
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c100
-rw-r--r--drivers/remoteproc/remoteproc_internal.h15
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c79
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c45
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c297
-rw-r--r--drivers/rpmsg/Kconfig1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c57
-rw-r--r--drivers/rtc/rtc-proc.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c5
-rw-r--r--drivers/rtc/rtc-s3c.c3
-rw-r--r--drivers/rtc/rtc-s3c.h70
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c6
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dcssblk.c15
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/char/sclp_cmd.c20
-rw-r--r--drivers/s390/char/zcore.c34
-rw-r--r--drivers/s390/cio/blacklist.c28
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c11
-rw-r--r--drivers/s390/kvm/virtio_ccw.c20
-rw-r--r--drivers/scsi/BusLogic.c83
-rw-r--r--drivers/scsi/BusLogic.h1
-rw-r--r--drivers/scsi/NCR5380.c61
-rw-r--r--drivers/scsi/NCR5380.h6
-rw-r--r--drivers/scsi/a2091.c3
-rw-r--r--drivers/scsi/a3000.c3
-rw-r--r--drivers/scsi/advansys.c1164
-rw-r--r--drivers/scsi/aha152x.c61
-rw-r--r--drivers/scsi/aha1740.c29
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c9
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c163
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h12
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c153
-rw-r--r--drivers/scsi/aic7xxx_old.c2
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx_proc.c221
-rw-r--r--drivers/scsi/arm/acornscsi.c58
-rw-r--r--drivers/scsi/arm/arxescsi.c40
-rw-r--r--drivers/scsi/arm/cumana_1.c1
-rw-r--r--drivers/scsi/arm/cumana_2.c43
-rw-r--r--drivers/scsi/arm/eesox.c42
-rw-r--r--drivers/scsi/arm/fas216.c31
-rw-r--r--drivers/scsi/arm/fas216.h6
-rw-r--r--drivers/scsi/arm/oak.c6
-rw-r--r--drivers/scsi/arm/powertec.c29
-rw-r--r--drivers/scsi/atari_NCR5380.c145
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/atari_scsi.h2
-rw-r--r--drivers/scsi/atp870u.c40
-rw-r--r--drivers/scsi/dc395x.c20
-rw-r--r--drivers/scsi/dpt_i2o.c102
-rw-r--r--drivers/scsi/dtc.c3
-rw-r--r--drivers/scsi/dtc.h3
-rw-r--r--drivers/scsi/eata_pio.c56
-rw-r--r--drivers/scsi/g_NCR5380.c51
-rw-r--r--drivers/scsi/gdth.c3
-rw-r--r--drivers/scsi/gdth.h3
-rw-r--r--drivers/scsi/gdth_proc.c211
-rw-r--r--drivers/scsi/gdth_proc.h5
-rw-r--r--drivers/scsi/gvp11.c3
-rw-r--r--drivers/scsi/imm.c40
-rw-r--r--drivers/scsi/in2000.c178
-rw-r--r--drivers/scsi/ips.c238
-rw-r--r--drivers/scsi/ips.h9
-rw-r--r--drivers/scsi/libsas/sas_expander.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c33
-rw-r--r--drivers/scsi/mac_scsi.c3
-rw-r--r--drivers/scsi/mac_scsi.h3
-rw-r--r--drivers/scsi/megaraid.c1040
-rw-r--r--drivers/scsi/megaraid.h17
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c14
-rw-r--r--drivers/scsi/mvme147.c3
-rw-r--r--drivers/scsi/nsp32.c39
-rw-r--r--drivers/scsi/pas16.c3
-rw-r--r--drivers/scsi/pas16.h3
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c36
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h9
-rw-r--r--drivers/scsi/pmcraid.c14
-rw-r--r--drivers/scsi/ppa.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c53
-rw-r--r--drivers/scsi/scsi_proc.c74
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sg.c1
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/scsi/sun3_NCR5380.c183
-rw-r--r--drivers/scsi/sun3_scsi.c1
-rw-r--r--drivers/scsi/sun3_scsi.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c115
-rw-r--r--drivers/scsi/t128.c3
-rw-r--r--drivers/scsi/t128.h3
-rw-r--r--drivers/scsi/virtio_scsi.c487
-rw-r--r--drivers/scsi/wd33c93.c194
-rw-r--r--drivers/scsi/wd33c93.h3
-rw-r--r--drivers/scsi/wd7000.c31
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-mxs.c60
-rw-r--r--drivers/spi/spi-pl022.c43
-rw-r--r--drivers/staging/android/logger.c1
-rw-r--r--drivers/staging/comedi/proc.c55
-rw-r--r--drivers/staging/csr/csr_wifi_hip_udi.c353
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_udi.h17
-rw-r--r--drivers/staging/csr/drv.c4
-rw-r--r--drivers/staging/csr/io.c151
-rw-r--r--drivers/staging/cxt1e1/Makefile3
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c460
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h14
-rw-r--r--drivers/staging/dgrp/dgrp_common.c31
-rw-r--r--drivers/staging/dgrp/dgrp_common.h65
-rw-r--r--drivers/staging/dgrp/dgrp_dpa_ops.c29
-rw-r--r--drivers/staging/dgrp/dgrp_mon_ops.c26
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c30
-rw-r--r--drivers/staging/dgrp/dgrp_ports_ops.c18
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c399
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c130
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c129
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c3
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c14
-rw-r--r--drivers/staging/keucr/scsiglue.c45
-rw-r--r--drivers/staging/media/go7007/go7007-driver.c7
-rw-r--r--drivers/staging/rtl8187se/r8180.h1
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c146
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_debug.c1029
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c21
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c44
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c53
-rw-r--r--drivers/staging/rtl8192u/ieee80211/proc.c8
-rw-r--r--drivers/staging/rtl8192u/r8192U.h1
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c211
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c30
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.h2
-rw-r--r--drivers/staging/silicom/Makefile3
-rw-r--r--drivers/staging/silicom/bp_proc.c1327
-rw-r--r--drivers/staging/silicom/bpctl_mod.c (renamed from drivers/staging/silicom/bp_mod.c)1925
-rw-r--r--drivers/staging/ste_rmi4/Makefile1
-rw-r--r--drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c31
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c31
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h1
-rw-r--r--drivers/staging/vt6655/device_main.c74
-rw-r--r--drivers/staging/vt6656/main_usb.c63
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c331
-rw-r--r--drivers/target/loopback/tcm_loop.c9
-rw-r--r--drivers/thermal/exynos_thermal.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c64
-rw-r--r--drivers/tty/serial/mxs-auart.c52
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sunsu.c1
-rw-r--r--drivers/usb/gadget/at91_udc.c2
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c124
-rw-r--r--drivers/usb/gadget/goku_udc.c89
-rw-r--r--drivers/usb/gadget/inode.c44
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/rndis.c4
-rw-r--r--drivers/usb/host/isp1362-hcd.c7
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/misc/yurex.c2
-rw-r--r--drivers/usb/storage/scsiglue.c42
-rw-r--r--drivers/vfio/pci/vfio_pci.c44
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c172
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c67
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h1
-rw-r--r--drivers/vfio/vfio.c117
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/Kconfig.tcm6
-rw-r--r--drivers/vhost/Makefile5
-rw-r--r--drivers/vhost/net.c307
-rw-r--r--drivers/vhost/scsi.c (renamed from drivers/vhost/tcm_vhost.c)263
-rw-r--r--drivers/vhost/tcm_vhost.h128
-rw-r--r--drivers/vhost/test.c13
-rw-r--r--drivers/vhost/vhost.c157
-rw-r--r--drivers/vhost/vhost.h42
-rw-r--r--drivers/vhost/vringh.c1007
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/atmel_lcdfb.c120
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/bfin_adv7393fb.c43
-rw-r--r--drivers/video/cirrusfb.c62
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbmem.c7
-rw-r--r--drivers/video/hdmi.c21
-rw-r--r--drivers/video/mxsfb.c260
-rw-r--r--drivers/video/omap2/Makefile2
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c59
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c39
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c58
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c95
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c135
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c49
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c126
-rw-r--r--drivers/video/omap2/displays/panel-taal.c300
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c4
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c45
-rw-r--r--drivers/video/omap2/dss/apply.c15
-rw-r--r--drivers/video/omap2/dss/core.c5
-rw-r--r--drivers/video/omap2/dss/dispc.c176
-rw-r--r--drivers/video/omap2/dss/dispc.h1
-rw-r--r--drivers/video/omap2/dss/dpi.c351
-rw-r--r--drivers/video/omap2/dss/dsi.c1256
-rw-r--r--drivers/video/omap2/dss/dss.c181
-rw-r--r--drivers/video/omap2/dss/dss.h59
-rw-r--r--drivers/video/omap2/dss/dss_features.c8
-rw-r--r--drivers/video/omap2/dss/hdmi.c68
-rw-r--r--drivers/video/omap2/dss/output.c1
-rw-r--r--drivers/video/omap2/dss/rfbi.c34
-rw-r--r--drivers/video/omap2/dss/sdi.c103
-rw-r--r--drivers/video/omap2/dss/venc.c56
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c32
-rw-r--r--drivers/video/pxa3xx-gcu.c36
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/virtio/virtio_ring.c297
-rw-r--r--drivers/zorro/proc.c6
1199 files changed, 104707 insertions, 39776 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8d96238..9953a42 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -164,4 +164,6 @@ source "drivers/irqchip/Kconfig"
source "drivers/ipack/Kconfig"
+source "drivers/reset/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 33360de..130abc1 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -37,6 +37,9 @@ obj-$(CONFIG_XEN) += xen/
# regulators early, since some subsystems rely on them to initialize
obj-$(CONFIG_REGULATOR) += regulator/
+# reset controllers early, since gpu drivers might rely on them to initialize
+obj-$(CONFIG_RESET_CONTROLLER) += reset/
+
# tty/ comes before char/ so that the VT console is the boot-time
# default.
obj-y += tty/
@@ -124,7 +127,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/
-obj-$(CONFIG_VHOST_NET) += vhost/
+obj-$(CONFIG_VHOST_RING) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 6d5bf64..00d2efd 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -194,7 +194,7 @@ static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
static int acpi_ac_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_ac_seq_show, PDE(inode)->data);
+ return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
}
static int acpi_ac_add_fs(struct acpi_device *device)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 0cc384b..e710045 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -929,7 +929,7 @@ static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
} \
static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
{ \
- return single_open(file, acpi_battery_read_##_name, PDE(inode)->data); \
+ return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
}
DECLARE_FILE_FUNCTIONS(info);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 92a659a..d2e617b 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -129,7 +129,7 @@ static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
static int acpi_button_state_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_button_state_seq_show, PDE(inode)->data);
+ return single_open(file, acpi_button_state_seq_show, PDE_DATA(inode));
}
static const struct file_operations acpi_button_state_fops = {
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 52ce767..aa1227a 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -120,7 +120,7 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_system_alarm_seq_show, PDE(inode)->data);
+ return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
}
static int get_date_field(char **p, u32 * value)
@@ -397,7 +397,7 @@ static int
acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_system_wakeup_device_seq_show,
- PDE(inode)->data);
+ PDE_DATA(inode));
}
static const struct file_operations acpi_system_wakeup_device_fops = {
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e523245..b6241ee 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -521,19 +521,6 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
return 0;
}
-static void
-acpi_sbs_remove_fs(struct proc_dir_entry **dir,
- struct proc_dir_entry *parent_dir)
-{
- if (*dir) {
- remove_proc_entry(ACPI_SBS_FILE_INFO, *dir);
- remove_proc_entry(ACPI_SBS_FILE_STATE, *dir);
- remove_proc_entry(ACPI_SBS_FILE_ALARM, *dir);
- remove_proc_entry((*dir)->name, parent_dir);
- *dir = NULL;
- }
-}
-
/* Smart Battery Interface */
static struct proc_dir_entry *acpi_battery_dir = NULL;
@@ -584,7 +571,7 @@ static int acpi_battery_read_info(struct seq_file *seq, void *offset)
static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_battery_read_info, PDE(inode)->data);
+ return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
}
static int acpi_battery_read_state(struct seq_file *seq, void *offset)
@@ -623,7 +610,7 @@ static int acpi_battery_read_state(struct seq_file *seq, void *offset)
static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_battery_read_state, PDE(inode)->data);
+ return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
}
static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
@@ -688,7 +675,7 @@ acpi_battery_write_alarm(struct file *file, const char __user * buffer,
static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_battery_read_alarm, PDE(inode)->data);
+ return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
}
static const struct file_operations acpi_battery_info_fops = {
@@ -736,7 +723,7 @@ static int acpi_ac_read_state(struct seq_file *seq, void *offset)
static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, acpi_ac_read_state, PDE(inode)->data);
+ return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
}
static const struct file_operations acpi_ac_state_fops = {
@@ -836,8 +823,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
power_supply_unregister(&battery->bat);
}
#ifdef CONFIG_ACPI_PROCFS_POWER
- if (battery->proc_entry)
- acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
+ proc_remove(battery->proc_entry);
+ battery->proc_entry = NULL;
#endif
}
@@ -873,8 +860,8 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
if (sbs->charger.dev)
power_supply_unregister(&sbs->charger);
#ifdef CONFIG_ACPI_PROCFS_POWER
- if (sbs->charger_entry)
- acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir);
+ proc_remove(sbs->charger_entry);
+ sbs->charger_entry = NULL;
#endif
}
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 405022d..7638121 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -209,8 +209,6 @@ struct arasan_cf_dev {
struct dma_chan *dma_chan;
/* Mask for DMA transfers */
dma_cap_mask_t mask;
- /* dma channel private data */
- void *dma_priv;
/* DMA transfer work */
struct work_struct work;
/* DMA delayed finish work */
@@ -308,6 +306,7 @@ static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
static int cf_init(struct arasan_cf_dev *acdev)
{
struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
+ unsigned int if_clk;
unsigned long flags;
int ret = 0;
@@ -325,8 +324,12 @@ static int cf_init(struct arasan_cf_dev *acdev)
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
- writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
- CF_IF_CLK_166M, acdev->vbase + CLK_CFG);
+ /* TODO: read from device tree */
+ if_clk = CF_IF_CLK_166M;
+ if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M)
+ if_clk = pdata->cf_if_clk;
+
+ writel(if_clk, acdev->vbase + CLK_CFG);
writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
@@ -357,12 +360,6 @@ static void dma_callback(void *dev)
complete(&acdev->dma_completion);
}
-static bool filter(struct dma_chan *chan, void *slave)
-{
- chan->private = slave;
- return true;
-}
-
static inline void dma_complete(struct arasan_cf_dev *acdev)
{
struct ata_queued_cmd *qc = acdev->qc;
@@ -530,8 +527,7 @@ static void data_xfer(struct work_struct *work)
/* request dma channels */
/* dma_request_channel may sleep, so calling from process context */
- acdev->dma_chan = dma_request_channel(acdev->mask, filter,
- acdev->dma_priv);
+ acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data");
if (!acdev->dma_chan) {
dev_err(acdev->host->dev, "Unable to get dma_chan\n");
goto chan_request_fail;
@@ -798,6 +794,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
struct ata_host *host;
struct ata_port *ap;
struct resource *res;
+ u32 quirk;
irq_handler_t irq_handler = NULL;
int ret = 0;
@@ -817,12 +814,17 @@ static int arasan_cf_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ if (pdata)
+ quirk = pdata->quirk;
+ else
+ quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
+
/* if irq is 0, support only PIO */
acdev->irq = platform_get_irq(pdev, 0);
if (acdev->irq)
irq_handler = arasan_cf_interrupt;
else
- pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
+ quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
acdev->pbase = res->start;
acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
@@ -859,17 +861,16 @@ static int arasan_cf_probe(struct platform_device *pdev)
INIT_WORK(&acdev->work, data_xfer);
INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
dma_cap_set(DMA_MEMCPY, acdev->mask);
- acdev->dma_priv = pdata->dma_priv;
/* Handle platform specific quirks */
- if (pdata->quirk) {
- if (pdata->quirk & CF_BROKEN_PIO) {
+ if (quirk) {
+ if (quirk & CF_BROKEN_PIO) {
ap->ops->set_piomode = NULL;
ap->pio_mask = 0;
}
- if (pdata->quirk & CF_BROKEN_MWDMA)
+ if (quirk & CF_BROKEN_MWDMA)
ap->mwdma_mask = 0;
- if (pdata->quirk & CF_BROKEN_UDMA)
+ if (quirk & CF_BROKEN_UDMA)
ap->udma_mask = 0;
}
ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 08608de..dc4f701 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -322,23 +322,11 @@ static u8 k2_stat_check_status(struct ata_port *ap)
}
#ifdef CONFIG_PPC_OF
-/*
- * k2_sata_proc_info
- * inout : decides on the direction of the dataflow and the meaning of the
- * variables
- * buffer: If inout==FALSE data is being written to it else read from it
- * *start: If inout==FALSE start of the valid data in the buffer
- * offset: If inout==FALSE offset from the beginning of the imaginary file
- * from which we start writing into the buffer
- * length: If inout==FALSE max number of bytes to be written into the buffer
- * else number of bytes in the buffer
- */
-static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
- off_t offset, int count, int inout)
+static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct ata_port *ap;
struct device_node *np;
- int len, index;
+ int index;
/* Find the ata_port */
ap = ata_shost_to_port(shost);
@@ -356,15 +344,12 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
const u32 *reg = of_get_property(np, "reg", NULL);
if (!reg)
continue;
- if (index == *reg)
+ if (index == *reg) {
+ seq_printf(m, "devspec: %s\n", np->full_name);
break;
+ }
}
- if (np == NULL)
- return 0;
-
- len = sprintf(page, "devspec: %s\n", np->full_name);
-
- return len;
+ return 0;
}
#endif /* CONFIG_PPC_OF */
@@ -372,7 +357,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
static struct scsi_host_template k2_sata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
#ifdef CONFIG_PPC_OF
- .proc_info = k2_sata_proc_info,
+ .show_info = k2_sata_show_info,
#endif
};
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 5b5ee79..eb39501 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6473,7 +6473,7 @@ static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
+ return single_open(file, dac960_initial_status_proc_show, PDE_DATA(inode));
}
static const struct file_operations dac960_initial_status_proc_fops = {
@@ -6519,7 +6519,7 @@ static int dac960_current_status_proc_show(struct seq_file *m, void *v)
static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
+ return single_open(file, dac960_current_status_proc_show, PDE_DATA(inode));
}
static const struct file_operations dac960_current_status_proc_fops = {
@@ -6540,14 +6540,14 @@ static int dac960_user_command_proc_show(struct seq_file *m, void *v)
static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
+ return single_open(file, dac960_user_command_proc_show, PDE_DATA(inode));
}
static ssize_t dac960_user_command_proc_write(struct file *file,
const char __user *Buffer,
size_t Count, loff_t *pos)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file_inode(file))->data;
+ DAC960_Controller_T *Controller = PDE_DATA(file_inode(file));
unsigned char CommandBuffer[80];
int Length;
if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 386146d..4ff85b8 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1634,7 +1634,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
@@ -1654,7 +1654,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
floppy_off (drive | 0x40000000);
#endif
mutex_unlock(&amiflop_mutex);
- return 0;
}
/*
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index a129f8c..916d9ed 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -169,7 +169,7 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
return -ENODEV;
}
-static int
+static void
aoeblk_release(struct gendisk *disk, fmode_t mode)
{
struct aoedev *d = disk->private_data;
@@ -180,11 +180,9 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
if (--d->nopen == 0) {
spin_unlock_irqrestore(&d->lock, flags);
aoecmd_cfg(d->aoemajor, d->aoeminor);
- return 0;
+ return;
}
spin_unlock_irqrestore(&d->lock, flags);
-
- return 0;
}
static void
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 92b6d7c..fc803ec 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -920,16 +920,14 @@ bio_pagedec(struct bio *bio)
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
- struct bio_vec *bv;
-
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
buf->resid = bio->bi_size;
buf->sector = bio->bi_sector;
bio_pageinc(bio);
- buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
- buf->bv_resid = bv->bv_len;
+ buf->bv = bio_iovec(bio);
+ buf->bv_resid = buf->bv->bv_len;
WARN_ON(buf->bv_resid == 0);
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index ede16c6..0e30c6e 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -367,7 +367,7 @@ static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
static void config_types( void );
static int floppy_open(struct block_device *bdev, fmode_t mode);
-static int floppy_release(struct gendisk *disk, fmode_t mode);
+static void floppy_release(struct gendisk *disk, fmode_t mode);
/************************* End of Prototypes **************************/
@@ -1886,7 +1886,7 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct atari_floppy_struct *p = disk->private_data;
mutex_lock(&ataflop_mutex);
@@ -1897,7 +1897,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
p->ref = 0;
}
mutex_unlock(&ataflop_mutex);
- return 0;
}
static const struct block_device_operations floppy_fops = {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 531ceb3..f1a29f8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -334,8 +334,7 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
int err = -EIO;
sector = bio->bi_sector;
- if (sector + (bio->bi_size >> SECTOR_SHIFT) >
- get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c1b8e5..6374dc1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -75,6 +75,12 @@ module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(cciss_simple_mode,
"Use 'simple mode' rather than 'performant mode'");
+static int cciss_allow_hpsa;
+module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_allow_hpsa,
+ "Prevent cciss driver from accessing hardware known to be "
+ " supported by the hpsa driver");
+
static DEFINE_MUTEX(cciss_mutex);
static struct proc_dir_entry *proc_cciss;
@@ -161,7 +167,7 @@ static irqreturn_t do_cciss_intx(int irq, void *dev_id);
static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
-static int cciss_release(struct gendisk *disk, fmode_t mode);
+static void cciss_release(struct gendisk *disk, fmode_t mode);
static int do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
@@ -493,7 +499,7 @@ static int cciss_seq_open(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
if (!ret)
- seq->private = PDE(inode)->data;
+ seq->private = PDE_DATA(inode);
return ret;
}
@@ -1123,7 +1129,7 @@ static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
/*
* Close. Sync first.
*/
-static int cciss_release(struct gendisk *disk, fmode_t mode)
+static void cciss_release(struct gendisk *disk, fmode_t mode)
{
ctlr_info_t *h;
drive_info_struct *drv;
@@ -1135,7 +1141,6 @@ static int cciss_release(struct gendisk *disk, fmode_t mode)
drv->usage_count--;
h->usage_count--;
mutex_unlock(&cciss_mutex);
- return 0;
}
static int do_ioctl(struct block_device *bdev, fmode_t mode,
@@ -4116,9 +4121,13 @@ static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id;
- for (i = 0; i < ARRAY_SIZE(products); i++)
+ for (i = 0; i < ARRAY_SIZE(products); i++) {
+ /* Stand aside for hpsa driver on request */
+ if (cciss_allow_hpsa)
+ return -ENODEV;
if (*board_id == products[i].board_id)
return i;
+ }
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
*board_id);
return -ENODEV;
@@ -4960,6 +4969,16 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ctlr_info_t *h;
unsigned long flags;
+ /*
+ * By default the cciss driver is used for all older HP Smart Array
+ * controllers. There are module paramaters that allow a user to
+ * override this behavior and instead use the hpsa SCSI driver. If
+ * this is the case cciss may be loaded first from the kdump initrd
+ * image and cause a kernel panic. So if reset_devices is true and
+ * cciss_allow_hpsa is set just bail.
+ */
+ if ((reset_devices) && (cciss_allow_hpsa == 1))
+ return -ENODEV;
rc = cciss_init_reset_devices(pdev);
if (rc) {
if (rc != -ENOTSUPP)
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index da33111..ecd845c 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -54,13 +54,11 @@ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
-static int cciss_scsi_proc_info(
- struct Scsi_Host *sh,
+static int cciss_scsi_write_info(struct Scsi_Host *sh,
char *buffer, /* data buffer */
- char **start, /* where data in buffer starts */
- off_t offset, /* offset from start of imaginary file */
- int length, /* length of data in buffer */
- int func); /* 0 == read, 1 == write */
+ int length); /* length of data in buffer */
+static int cciss_scsi_show_info(struct seq_file *m,
+ struct Scsi_Host *sh);
static int cciss_scsi_queue_command (struct Scsi_Host *h,
struct scsi_cmnd *cmd);
@@ -82,7 +80,8 @@ static struct scsi_host_template cciss_driver_template = {
.module = THIS_MODULE,
.name = "cciss",
.proc_name = "cciss",
- .proc_info = cciss_scsi_proc_info,
+ .write_info = cciss_scsi_write_info,
+ .show_info = cciss_scsi_show_info,
.queuecommand = cciss_scsi_queue_command,
.this_id = 7,
.cmd_per_lun = 1,
@@ -1302,59 +1301,54 @@ cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
return length;
}
-
static int
-cciss_scsi_proc_info(struct Scsi_Host *sh,
+cciss_scsi_write_info(struct Scsi_Host *sh,
char *buffer, /* data buffer */
- char **start, /* where data in buffer starts */
- off_t offset, /* offset from start of imaginary file */
- int length, /* length of data in buffer */
- int func) /* 0 == read, 1 == write */
+ int length) /* length of data in buffer */
{
+ ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
+ if (h == NULL) /* This really shouldn't ever happen. */
+ return -EINVAL;
- int buflen, datalen;
- ctlr_info_t *h;
+ return cciss_scsi_user_command(h, sh->host_no,
+ buffer, length);
+}
+
+static int
+cciss_scsi_show_info(struct seq_file *m, struct Scsi_Host *sh)
+{
+
+ ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
int i;
- h = (ctlr_info_t *) sh->hostdata[0];
if (h == NULL) /* This really shouldn't ever happen. */
return -EINVAL;
- if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
- buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
- h->ctlr, sh->host_no);
-
- /* this information is needed by apps to know which cciss
- device corresponds to which scsi host number without
- having to open a scsi target device node. The device
- information is not a duplicate of /proc/scsi/scsi because
- the two may be out of sync due to scsi hotplug, rather
- this info is for an app to be able to use to know how to
- get them back in sync. */
-
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- struct cciss_scsi_dev_t *sd =
- &ccissscsi[h->ctlr].dev[i];
- buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
- "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- sh->host_no, sd->bus, sd->target, sd->lun,
- sd->devtype,
- sd->scsi3addr[0], sd->scsi3addr[1],
- sd->scsi3addr[2], sd->scsi3addr[3],
- sd->scsi3addr[4], sd->scsi3addr[5],
- sd->scsi3addr[6], sd->scsi3addr[7]);
- }
- datalen = buflen - offset;
- if (datalen < 0) { /* they're reading past EOF. */
- datalen = 0;
- *start = buffer+buflen;
- } else
- *start = buffer + offset;
- return(datalen);
- } else /* User is writing to /proc/scsi/cciss*?/?* ... */
- return cciss_scsi_user_command(h, sh->host_no,
- buffer, length);
-}
+ seq_printf(m, "cciss%d: SCSI host: %d\n",
+ h->ctlr, sh->host_no);
+
+ /* this information is needed by apps to know which cciss
+ device corresponds to which scsi host number without
+ having to open a scsi target device node. The device
+ information is not a duplicate of /proc/scsi/scsi because
+ the two may be out of sync due to scsi hotplug, rather
+ this info is for an app to be able to use to know how to
+ get them back in sync. */
+
+ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+ struct cciss_scsi_dev_t *sd =
+ &ccissscsi[h->ctlr].dev[i];
+ seq_printf(m, "c%db%dt%dl%d %02d "
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ sh->host_no, sd->bus, sd->target, sd->lun,
+ sd->devtype,
+ sd->scsi3addr[0], sd->scsi3addr[1],
+ sd->scsi3addr[2], sd->scsi3addr[3],
+ sd->scsi3addr[4], sd->scsi3addr[5],
+ sd->scsi3addr[6], sd->scsi3addr[7]);
+ }
+ return 0;
+}
/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
dma mapping and fills in the scatter gather entries of the
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 3f08713..639d26b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -160,7 +160,7 @@ static int sendcmd(
unsigned int log_unit );
static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
-static int ida_release(struct gendisk *disk, fmode_t mode);
+static void ida_release(struct gendisk *disk, fmode_t mode);
static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
@@ -296,7 +296,7 @@ static int ida_proc_show(struct seq_file *m, void *v)
static int ida_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ida_proc_show, PDE(inode)->data);
+ return single_open(file, ida_proc_show, PDE_DATA(inode));
}
static const struct file_operations ida_proc_fops = {
@@ -856,7 +856,7 @@ static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
/*
* Close. Sync first.
*/
-static int ida_release(struct gendisk *disk, fmode_t mode)
+static void ida_release(struct gendisk *disk, fmode_t mode)
{
ctlr_info_t *host;
@@ -864,8 +864,6 @@ static int ida_release(struct gendisk *disk, fmode_t mode)
host = get_host(disk);
host->usage_count--;
mutex_unlock(&cpqarray_mutex);
-
- return 0;
}
/*
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 92510f8..6608076 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -104,7 +104,6 @@ struct update_al_work {
int err;
};
-static int al_write_transaction(struct drbd_conf *mdev);
void *drbd_md_get_buffer(struct drbd_conf *mdev)
{
@@ -168,7 +167,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
- if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
+ if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL)
+ /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
+ ;
+ else if (!get_ldev_if_state(mdev, D_ATTACHING)) {
+ /* Corresponding put_ldev in drbd_md_io_complete() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
err = -ENODEV;
goto out;
@@ -199,9 +202,10 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
BUG_ON(!bdev->md_bdev);
- dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
+ dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
+ (void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
@@ -209,7 +213,8 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
+ /* we do all our meta data IO in aligned 4k blocks. */
+ err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
if (err) {
dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
@@ -217,44 +222,99 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
return err;
}
-static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
+static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr)
{
- struct lc_element *al_ext;
struct lc_element *tmp;
- int wake;
-
- spin_lock_irq(&mdev->al_lock);
tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
- if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
- wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
- spin_unlock_irq(&mdev->al_lock);
- if (wake)
- wake_up(&mdev->al_wait);
- return NULL;
- }
+ if (test_bit(BME_NO_WRITES, &bm_ext->flags))
+ return bm_ext;
+ }
+ return NULL;
+}
+
+static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock)
+{
+ struct lc_element *al_ext;
+ struct bm_extent *bm_ext;
+ int wake;
+
+ spin_lock_irq(&mdev->al_lock);
+ bm_ext = find_active_resync_extent(mdev, enr);
+ if (bm_ext) {
+ wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
+ spin_unlock_irq(&mdev->al_lock);
+ if (wake)
+ wake_up(&mdev->al_wait);
+ return NULL;
}
- al_ext = lc_get(mdev->act_log, enr);
+ if (nonblock)
+ al_ext = lc_try_get(mdev->act_log, enr);
+ else
+ al_ext = lc_get(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
return al_ext;
}
-void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
+bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
- unsigned enr;
- bool locked = false;
+ D_ASSERT((unsigned)(last - first) <= 1);
+ D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+
+ /* FIXME figure out a fast path for bios crossing AL extent boundaries */
+ if (first != last)
+ return false;
+
+ return _al_get(mdev, first, true);
+}
+
+bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
+ bool need_transaction = false;
D_ASSERT(first <= last);
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
- for (enr = first; enr <= last; enr++)
- wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
+ for (enr = first; enr <= last; enr++) {
+ struct lc_element *al_ext;
+ wait_event(mdev->al_wait,
+ (al_ext = _al_get(mdev, enr, false)) != NULL);
+ if (al_ext->lc_number != enr)
+ need_transaction = true;
+ }
+ return need_transaction;
+}
+
+static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
+
+/* When called through generic_make_request(), we must delegate
+ * activity log I/O to the worker thread: a further request
+ * submitted via generic_make_request() within the same task
+ * would be queued on current->bio_list, and would only start
+ * after this function returns (see generic_make_request()).
+ *
+ * However, if we *are* the worker, we must not delegate to ourselves.
+ */
+
+/*
+ * @delegate: delegate activity log I/O to the worker thread
+ */
+void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
+{
+ bool locked = false;
+
+ BUG_ON(delegate && current == mdev->tconn->worker.task);
/* Serialize multiple transactions.
* This uses test_and_set_bit, memory barrier is implicit.
@@ -264,13 +324,6 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
(locked = lc_try_lock_for_transaction(mdev->act_log)));
if (locked) {
- /* drbd_al_write_transaction(mdev,al_ext,enr);
- * recurses into generic_make_request(), which
- * disallows recursion, bios being serialized on the
- * current->bio_tail list now.
- * we have to delegate updates to the activity log
- * to the worker thread. */
-
/* Double check: it may have been committed by someone else,
* while we have been waiting for the lock. */
if (mdev->act_log->pending_changes) {
@@ -280,11 +333,8 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
rcu_read_unlock();
- if (write_al_updates) {
- al_write_transaction(mdev);
- mdev->al_writ_cnt++;
- }
-
+ if (write_al_updates)
+ al_write_transaction(mdev, delegate);
spin_lock_irq(&mdev->al_lock);
/* FIXME
if (err)
@@ -298,6 +348,66 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
}
}
+/*
+ * @delegate: delegate activity log I/O to the worker thread
+ */
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
+{
+ BUG_ON(delegate && current == mdev->tconn->worker.task);
+
+ if (drbd_al_begin_io_prepare(mdev, i))
+ drbd_al_begin_io_commit(mdev, delegate);
+}
+
+int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ struct lru_cache *al = mdev->act_log;
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned nr_al_extents;
+ unsigned available_update_slots;
+ unsigned enr;
+
+ D_ASSERT(first <= last);
+
+ nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
+ available_update_slots = min(al->nr_elements - al->used,
+ al->max_pending_changes - al->pending_changes);
+
+ /* We want all necessary updates for a given request within the same transaction
+ * We could first check how many updates are *actually* needed,
+ * and use that instead of the worst-case nr_al_extents */
+ if (available_update_slots < nr_al_extents)
+ return -EWOULDBLOCK;
+
+ /* Is resync active in this area? */
+ for (enr = first; enr <= last; enr++) {
+ struct lc_element *tmp;
+ tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+ if (unlikely(tmp != NULL)) {
+ struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
+ if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
+ if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
+ return -EBUSY;
+ return -EWOULDBLOCK;
+ }
+ }
+ }
+
+ /* Checkout the refcounts.
+ * Given that we checked for available elements and update slots above,
+ * this has to be successful. */
+ for (enr = first; enr <= last; enr++) {
+ struct lc_element *al_ext;
+ al_ext = lc_get_cumulative(mdev->act_log, enr);
+ if (!al_ext)
+ dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
+ }
+ return 0;
+}
+
void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
@@ -350,6 +460,24 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
+static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
+{
+ const unsigned int stripes = mdev->ldev->md.al_stripes;
+ const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
+
+ /* transaction number, modulo on-disk ring buffer wrap around */
+ unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k);
+
+ /* ... to aligned 4k on disk block */
+ t = ((t % stripes) * stripe_size_4kB) + t/stripes;
+
+ /* ... to 512 byte sector in activity log */
+ t *= 8;
+
+ /* ... plus offset to the on disk position */
+ return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
+}
+
static int
_al_write_transaction(struct drbd_conf *mdev)
{
@@ -432,23 +560,27 @@ _al_write_transaction(struct drbd_conf *mdev)
if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
mdev->al_tr_cycle = 0;
- sector = mdev->ldev->md.md_offset
- + mdev->ldev->md.al_offset
- + mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
+ sector = al_tr_number_to_on_disk_sector(mdev);
crc = crc32c(0, buffer, 4096);
buffer->crc32c = cpu_to_be32(crc);
if (drbd_bm_write_hinted(mdev))
err = -EIO;
- /* drbd_chk_io_error done already */
- else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
- err = -EIO;
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
- } else {
- /* advance ringbuffer position and transaction counter */
- mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
- mdev->al_tr_number++;
+ else {
+ bool write_al_updates;
+ rcu_read_lock();
+ write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ rcu_read_unlock();
+ if (write_al_updates) {
+ if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ err = -EIO;
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ } else {
+ mdev->al_tr_number++;
+ mdev->al_writ_cnt++;
+ }
+ }
}
drbd_md_put_buffer(mdev);
@@ -474,20 +606,18 @@ static int w_al_write_transaction(struct drbd_work *w, int unused)
/* Calls from worker context (see w_restart_disk_io()) need to write the
transaction directly. Others came through generic_make_request(),
those need to delegate it to the worker. */
-static int al_write_transaction(struct drbd_conf *mdev)
+static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
{
- struct update_al_work al_work;
-
- if (current == mdev->tconn->worker.task)
+ if (delegate) {
+ struct update_al_work al_work;
+ init_completion(&al_work.event);
+ al_work.w.cb = w_al_write_transaction;
+ al_work.w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ wait_for_completion(&al_work.event);
+ return al_work.err;
+ } else
return _al_write_transaction(mdev);
-
- init_completion(&al_work.event);
- al_work.w.cb = w_al_write_transaction;
- al_work.w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
- wait_for_completion(&al_work.event);
-
- return al_work.err;
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 8dc2950..64fbb83 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -612,6 +612,17 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
}
}
+/* For the layout, see comment above drbd_md_set_sector_offsets(). */
+static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
+{
+ u64 bitmap_sectors;
+ if (ldev->md.al_offset == 8)
+ bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
+ else
+ bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
+ return bitmap_sectors << (9 + 3);
+}
+
/*
* make sure the bitmap has enough room for the attached storage,
* if necessary, resize.
@@ -668,7 +679,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
words = ALIGN(bits, 64) >> LN2_BPL;
if (get_ldev(mdev)) {
- u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
+ u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
put_ldev(mdev);
if (bits > bits_on_disk) {
dev_info(DEV, "bits = %lu\n", bits);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 6b51afa..f943aac 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -753,13 +753,16 @@ struct drbd_md {
u32 flags;
u32 md_size_sect;
- s32 al_offset; /* signed relative sector offset to al area */
+ s32 al_offset; /* signed relative sector offset to activity log */
s32 bm_offset; /* signed relative sector offset to bitmap */
- /* u32 al_nr_extents; important for restoring the AL
- * is stored into ldev->dc.al_extents, which in turn
- * gets applied to act_log->nr_elements
- */
+ /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
+ s32 meta_dev_idx;
+
+ /* see al_tr_number_to_on_disk_sector() */
+ u32 al_stripes;
+ u32 al_stripe_size_4k;
+ u32 al_size_4k; /* cached product of the above */
};
struct drbd_backing_dev {
@@ -891,6 +894,14 @@ struct drbd_tconn { /* is a resource from the config file */
} send;
};
+struct submit_worker {
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ spinlock_t lock;
+ struct list_head writes;
+};
+
struct drbd_conf {
struct drbd_tconn *tconn;
int vnr; /* volume number within the connection */
@@ -1009,7 +1020,6 @@ struct drbd_conf {
struct lru_cache *act_log; /* activity log */
unsigned int al_tr_number;
int al_tr_cycle;
- int al_tr_pos; /* position of the next transaction in the journal */
wait_queue_head_t seq_wait;
atomic_t packet_seq;
unsigned int peer_seq;
@@ -1032,6 +1042,10 @@ struct drbd_conf {
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
unsigned int local_max_bio_size;
+
+ /* any requests that would block in drbd_make_request()
+ * are deferred to this single-threaded work queue */
+ struct submit_worker submit;
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1148,25 +1162,44 @@ extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
/* Meta data layout
- We reserve a 128MB Block (4k aligned)
- * either at the end of the backing device
- * or on a separate meta data device. */
+ *
+ * We currently have two possible layouts.
+ * Offsets in (512 byte) sectors.
+ * external:
+ * |----------- md_size_sect ------------------|
+ * [ 4k superblock ][ activity log ][ Bitmap ]
+ * | al_offset == 8 |
+ * | bm_offset = al_offset + X |
+ * ==> bitmap sectors = md_size_sect - bm_offset
+ *
+ * Variants:
+ * old, indexed fixed size meta data:
+ *
+ * internal:
+ * |----------- md_size_sect ------------------|
+ * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*]
+ * | al_offset < 0 |
+ * | bm_offset = al_offset - Y |
+ * ==> bitmap sectors = Y = al_offset - bm_offset
+ *
+ * [padding*] are zero or up to 7 unused 512 Byte sectors to the
+ * end of the device, so that the [4k superblock] will be 4k aligned.
+ *
+ * The activity log consists of 4k transaction blocks,
+ * which are written in a ring-buffer, or striped ring-buffer like fashion,
+ * which are writtensize used to be fixed 32kB,
+ * but is about to become configurable.
+ */
-/* The following numbers are sectors */
-/* Allows up to about 3.8TB, so if you want more,
+/* Our old fixed size meta data layout
+ * allows up to about 3.8TB, so if you want more,
* you need to use the "flexible" meta data format. */
-#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
-#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
-#define MD_AL_SECTORS 64 /* = 32 kB on disk activity log ring buffer */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
-
-/* we do all meta data IO in 4k blocks */
-#define MD_BLOCK_SHIFT 12
-#define MD_BLOCK_SIZE (1<<MD_BLOCK_SHIFT)
+#define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */
+#define MD_4kB_SECT 8
+#define MD_32kB_SECT 64
/* One activity log extent represents 4M of storage */
#define AL_EXTENT_SHIFT 22
@@ -1256,7 +1289,6 @@ struct bm_extent {
/* in one sector of the bitmap, we have this many activity_log extents. */
#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
-#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
@@ -1276,16 +1308,18 @@ struct bm_extent {
*/
#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
-#define DRBD_MAX_SECTORS_BM \
- ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
-#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
-#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
+/* we have a certain meta data variant that has a fixed on-disk size of 128
+ * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
+ * log, leaving this many sectors for the bitmap.
+ */
+
+#define DRBD_MAX_SECTORS_FIXED_BM \
+ ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
+#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
#else
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
+#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
/* 16 TB in units of sectors */
#if BITS_PER_LONG == 32
/* adjust by one page worth of bitmap,
@@ -1418,6 +1452,7 @@ extern void conn_free_crypto(struct drbd_tconn *tconn);
extern int proc_details;
/* drbd_req */
+extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
@@ -1576,7 +1611,10 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
+extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
@@ -1755,9 +1793,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
* BTW, for internal meta data, this happens to be the maximum capacity
* we could agree upon with our peer node.
*/
-static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
{
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + bdev->md.bm_offset;
@@ -1767,36 +1805,19 @@ static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backi
}
}
-static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
-{
- int meta_dev_idx;
-
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- return _drbd_md_first_sector(meta_dev_idx, bdev);
-}
-
/**
* drbd_md_last_sector() - Return the last sector number of the meta data area
* @bdev: Meta data block device.
*/
static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
{
- int meta_dev_idx;
-
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
- return bdev->md.md_offset + MD_AL_OFFSET - 1;
+ return bdev->md.md_offset + MD_4kB_SECT -1;
case DRBD_MD_INDEX_FLEX_EXT:
default:
- return bdev->md.md_offset + bdev->md.md_size_sect;
+ return bdev->md.md_offset + bdev->md.md_size_sect -1;
}
}
@@ -1818,18 +1839,13 @@ static inline sector_t drbd_get_capacity(struct block_device *bdev)
static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
{
sector_t s;
- int meta_dev_idx;
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
s = drbd_get_capacity(bdev->backing_bdev)
? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
- _drbd_md_first_sector(meta_dev_idx, bdev))
+ drbd_md_first_sector(bdev))
: 0;
break;
case DRBD_MD_INDEX_FLEX_EXT:
@@ -1848,39 +1864,24 @@ static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
}
/**
- * drbd_md_ss__() - Return the sector number of our meta data super block
- * @mdev: DRBD device.
+ * drbd_md_ss() - Return the sector number of our meta data super block
* @bdev: Meta data block device.
*/
-static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev)
+static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
{
- int meta_dev_idx;
+ const int meta_dev_idx = bdev->md.meta_dev_idx;
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- switch (meta_dev_idx) {
- default: /* external, some index */
- return MD_RESERVED_SECT * meta_dev_idx;
- case DRBD_MD_INDEX_INTERNAL:
- /* with drbd08, internal meta data is always "flexible" */
- case DRBD_MD_INDEX_FLEX_INT:
- /* sizeof(struct md_on_disk_07) == 4k
- * position: last 4k aligned block of 4k size */
- if (!bdev->backing_bdev) {
- if (__ratelimit(&drbd_ratelimit_state)) {
- dev_err(DEV, "bdev->backing_bdev==NULL\n");
- dump_stack();
- }
- return 0;
- }
- return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
- - MD_AL_OFFSET;
- case DRBD_MD_INDEX_FLEX_EXT:
+ if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
return 0;
- }
+
+ /* Since drbd08, internal meta data is always "flexible".
+ * position: last 4k aligned block of 4k size */
+ if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
+ return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
+
+ /* external, some index; this is the old fixed size layout */
+ return MD_128MB_SECT * bdev->md.meta_dev_idx;
}
static inline void
@@ -2053,9 +2054,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
if (mdev->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
drbd_ldev_destroy(mdev);
- if (mdev->state.disk == D_FAILED)
+ if (mdev->state.disk == D_FAILED) {
/* all application IO references gone. */
- drbd_go_diskless(mdev);
+ if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
+ }
wake_up(&mdev->misc_wait);
}
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e98da67..a5dca6a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -45,7 +45,7 @@
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
-
+#include <linux/workqueue.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/vmalloc.h>
@@ -63,7 +63,7 @@ int drbd_asender(struct drbd_thread *);
int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
-static int drbd_release(struct gendisk *gd, fmode_t mode);
+static void drbd_release(struct gendisk *gd, fmode_t mode);
static int w_md_sync(struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
static int w_bitmap_io(struct drbd_work *w, int unused);
@@ -1849,13 +1849,12 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
return rv;
}
-static int drbd_release(struct gendisk *gd, fmode_t mode)
+static void drbd_release(struct gendisk *gd, fmode_t mode)
{
struct drbd_conf *mdev = gd->private_data;
mutex_lock(&drbd_main_mutex);
mdev->open_cnt--;
mutex_unlock(&drbd_main_mutex);
- return 0;
}
static void drbd_set_defaults(struct drbd_conf *mdev)
@@ -2300,6 +2299,7 @@ static void drbd_cleanup(void)
idr_for_each_entry(&minors, mdev, i) {
idr_remove(&minors, mdev_to_minor(mdev));
idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ destroy_workqueue(mdev->submit.wq);
del_gendisk(mdev->vdisk);
/* synchronize_rcu(); No other threads running at this point */
kref_put(&mdev->kref, &drbd_minor_destroy);
@@ -2589,6 +2589,21 @@ void conn_destroy(struct kref *kref)
kfree(tconn);
}
+int init_submitter(struct drbd_conf *mdev)
+{
+ /* opencoded create_singlethread_workqueue(),
+ * to be able to say "drbd%d", ..., minor */
+ mdev->submit.wq = alloc_workqueue("drbd%u_submit",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
+ if (!mdev->submit.wq)
+ return -ENOMEM;
+
+ INIT_WORK(&mdev->submit.worker, do_submit);
+ spin_lock_init(&mdev->submit.lock);
+ INIT_LIST_HEAD(&mdev->submit.writes);
+ return 0;
+}
+
enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
{
struct drbd_conf *mdev;
@@ -2678,6 +2693,12 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
goto out_idr_remove_minor;
}
+ if (init_submitter(mdev)) {
+ err = ERR_NOMEM;
+ drbd_msg_put_info("unable to create submit workqueue");
+ goto out_idr_remove_vol;
+ }
+
add_disk(disk);
kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
@@ -2688,6 +2709,8 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
return NO_ERROR;
+out_idr_remove_vol:
+ idr_remove(&tconn->volumes, vnr_got);
out_idr_remove_minor:
idr_remove(&minors, minor_got);
synchronize_rcu();
@@ -2795,6 +2818,7 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ kfree(ldev->disk_conf);
kfree(ldev);
}
@@ -2834,8 +2858,9 @@ void conn_md_sync(struct drbd_tconn *tconn)
rcu_read_unlock();
}
+/* aligned 4kByte */
struct meta_data_on_disk {
- u64 la_size; /* last agreed size. */
+ u64 la_size_sect; /* last agreed size. */
u64 uuid[UI_SIZE]; /* UUIDs. */
u64 device_uuid;
u64 reserved_u64_1;
@@ -2843,13 +2868,17 @@ struct meta_data_on_disk {
u32 magic;
u32 md_size_sect;
u32 al_offset; /* offset to this block */
- u32 al_nr_extents; /* important for restoring the AL */
+ u32 al_nr_extents; /* important for restoring the AL (userspace) */
/* `-- act_log->nr_elements <-- ldev->dc.al_extents */
u32 bm_offset; /* offset to the bitmap, from here */
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 la_peer_max_bio_size; /* last peer max_bio_size */
- u32 reserved_u32[3];
+ /* see al_tr_number_to_on_disk_sector() */
+ u32 al_stripes;
+ u32 al_stripe_size_4k;
+
+ u8 reserved_u8[4096 - (7*8 + 10*4)];
} __packed;
/**
@@ -2862,6 +2891,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
sector_t sector;
int i;
+ /* Don't accidentally change the DRBD meta data layout. */
+ BUILD_BUG_ON(UI_SIZE != 4);
+ BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
+
del_timer(&mdev->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
@@ -2876,9 +2909,9 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!buffer)
goto out;
- memset(buffer, 0, 512);
+ memset(buffer, 0, sizeof(*buffer));
- buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
@@ -2893,7 +2926,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
- D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
+ buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
+ buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
+
+ D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
sector = mdev->ldev->md.md_offset;
if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
@@ -2911,13 +2947,141 @@ out:
put_ldev(mdev);
}
+static int check_activity_log_stripe_size(struct drbd_conf *mdev,
+ struct meta_data_on_disk *on_disk,
+ struct drbd_md *in_core)
+{
+ u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
+ u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
+ u64 al_size_4k;
+
+ /* both not set: default to old fixed size activity log */
+ if (al_stripes == 0 && al_stripe_size_4k == 0) {
+ al_stripes = 1;
+ al_stripe_size_4k = MD_32kB_SECT/8;
+ }
+
+ /* some paranoia plausibility checks */
+
+ /* we need both values to be set */
+ if (al_stripes == 0 || al_stripe_size_4k == 0)
+ goto err;
+
+ al_size_4k = (u64)al_stripes * al_stripe_size_4k;
+
+ /* Upper limit of activity log area, to avoid potential overflow
+ * problems in al_tr_number_to_on_disk_sector(). As right now, more
+ * than 72 * 4k blocks total only increases the amount of history,
+ * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
+ if (al_size_4k > (16 * 1024 * 1024/4))
+ goto err;
+
+ /* Lower limit: we need at least 8 transaction slots (32kB)
+ * to not break existing setups */
+ if (al_size_4k < MD_32kB_SECT/8)
+ goto err;
+
+ in_core->al_stripe_size_4k = al_stripe_size_4k;
+ in_core->al_stripes = al_stripes;
+ in_core->al_size_4k = al_size_4k;
+
+ return 0;
+err:
+ dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
+ al_stripes, al_stripe_size_4k);
+ return -EINVAL;
+}
+
+static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+{
+ sector_t capacity = drbd_get_capacity(bdev->md_bdev);
+ struct drbd_md *in_core = &bdev->md;
+ s32 on_disk_al_sect;
+ s32 on_disk_bm_sect;
+
+ /* The on-disk size of the activity log, calculated from offsets, and
+ * the size of the activity log calculated from the stripe settings,
+ * should match.
+ * Though we could relax this a bit: it is ok, if the striped activity log
+ * fits in the available on-disk activity log size.
+ * Right now, that would break how resize is implemented.
+ * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
+ * of possible unused padding space in the on disk layout. */
+ if (in_core->al_offset < 0) {
+ if (in_core->bm_offset > in_core->al_offset)
+ goto err;
+ on_disk_al_sect = -in_core->al_offset;
+ on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
+ } else {
+ if (in_core->al_offset != MD_4kB_SECT)
+ goto err;
+ if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
+ goto err;
+
+ on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
+ on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
+ }
+
+ /* old fixed size meta data is exactly that: fixed. */
+ if (in_core->meta_dev_idx >= 0) {
+ if (in_core->md_size_sect != MD_128MB_SECT
+ || in_core->al_offset != MD_4kB_SECT
+ || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
+ || in_core->al_stripes != 1
+ || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
+ goto err;
+ }
+
+ if (capacity < in_core->md_size_sect)
+ goto err;
+ if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
+ goto err;
+
+ /* should be aligned, and at least 32k */
+ if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
+ goto err;
+
+ /* should fit (for now: exactly) into the available on-disk space;
+ * overflow prevention is in check_activity_log_stripe_size() above. */
+ if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
+ goto err;
+
+ /* again, should be aligned */
+ if (in_core->bm_offset & 7)
+ goto err;
+
+ /* FIXME check for device grow with flex external meta data? */
+
+ /* can the available bitmap space cover the last agreed device size? */
+ if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(DEV, "meta data offsets don't make sense: idx=%d "
+ "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
+ "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
+ in_core->meta_dev_idx,
+ in_core->al_stripes, in_core->al_stripe_size_4k,
+ in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
+ (unsigned long long)in_core->la_size_sect,
+ (unsigned long long)capacity);
+
+ return -EINVAL;
+}
+
+
/**
* drbd_md_read() - Reads in the meta data super block
* @mdev: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
- * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
+ * Return NO_ERROR on success, and an enum drbd_ret_code in case
* something goes wrong.
+ *
+ * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
+ * even before @bdev is assigned to @mdev->ldev.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
@@ -2925,12 +3089,17 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
u32 magic, flags;
int i, rv = NO_ERROR;
- if (!get_ldev_if_state(mdev, D_ATTACHING))
- return ERR_IO_MD_DISK;
+ if (mdev->state.disk != D_DISKLESS)
+ return ERR_DISK_CONFIGURED;
buffer = drbd_md_get_buffer(mdev);
if (!buffer)
- goto out;
+ return ERR_NOMEM;
+
+ /* First, figure out where our meta data superblock is located,
+ * and read it. */
+ bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
+ bdev->md.md_offset = drbd_md_ss(bdev);
if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
@@ -2949,45 +3118,51 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = ERR_MD_UNCLEAN;
goto err;
}
+
+ rv = ERR_MD_INVALID;
if (magic != DRBD_MD_MAGIC_08) {
if (magic == DRBD_MD_MAGIC_07)
dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
- rv = ERR_MD_INVALID;
goto err;
}
- if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
- dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
- be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
- rv = ERR_MD_INVALID;
+
+ if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
+ dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
+ be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
goto err;
}
+
+
+ /* convert to in_core endian */
+ bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
+ for (i = UI_CURRENT; i < UI_SIZE; i++)
+ bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
+ bdev->md.flags = be32_to_cpu(buffer->flags);
+ bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
+
+ bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
+ bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
+ bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
+
+ if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
+ goto err;
+ if (check_offsets_and_sizes(mdev, bdev))
+ goto err;
+
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
- rv = ERR_MD_INVALID;
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
- rv = ERR_MD_INVALID;
goto err;
}
- if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
- dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
- be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
- rv = ERR_MD_INVALID;
- goto err;
- }
-
- bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
- for (i = UI_CURRENT; i < UI_SIZE; i++)
- bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
- bdev->md.flags = be32_to_cpu(buffer->flags);
- bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
+ rv = NO_ERROR;
spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) {
@@ -3000,8 +3175,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
err:
drbd_md_put_buffer(mdev);
- out:
- put_ldev(mdev);
return rv;
}
@@ -3239,8 +3412,12 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* end up here after a failed attach, before ldev was even assigned.
*/
if (mdev->bitmap && mdev->ldev) {
+ /* An interrupted resync or similar is allowed to recounts bits
+ * while we detach.
+ * Any modifications would not be expected anymore, though.
+ */
if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
- "detach", BM_LOCKED_MASK)) {
+ "detach", BM_LOCKED_TEST_ALLOWED)) {
if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
drbd_md_set_flag(mdev, MDF_FULL_SYNC);
drbd_md_sync(mdev);
@@ -3252,13 +3429,6 @@ static int w_go_diskless(struct drbd_work *w, int unused)
return 0;
}
-void drbd_go_diskless(struct drbd_conf *mdev)
-{
- D_ASSERT(mdev->state.disk == D_FAILED);
- if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
-}
-
/**
* drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
* @mdev: DRBD device.
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 2af26fc..9e3f441 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -696,37 +696,52 @@ out:
return 0;
}
-/* initializes the md.*_offset members, so we are able to find
- * the on disk meta data */
+/* Initializes the md.*_offset members, so we are able to find
+ * the on disk meta data.
+ *
+ * We currently have two possible layouts:
+ * external:
+ * |----------- md_size_sect ------------------|
+ * [ 4k superblock ][ activity log ][ Bitmap ]
+ * | al_offset == 8 |
+ * | bm_offset = al_offset + X |
+ * ==> bitmap sectors = md_size_sect - bm_offset
+ *
+ * internal:
+ * |----------- md_size_sect ------------------|
+ * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
+ * | al_offset < 0 |
+ * | bm_offset = al_offset - Y |
+ * ==> bitmap sectors = Y = al_offset - bm_offset
+ *
+ * Activity log size used to be fixed 32kB,
+ * but is about to become configurable.
+ */
static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
- int meta_dev_idx;
+ unsigned int al_size_sect = bdev->md.al_size_4k * 8;
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ bdev->md.md_offset = drbd_md_ss(bdev);
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
- bdev->md.md_size_sect = MD_RESERVED_SECT;
- bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
- bdev->md.al_offset = MD_AL_OFFSET;
- bdev->md.bm_offset = MD_BM_OFFSET;
+ bdev->md.md_size_sect = MD_128MB_SECT;
+ bdev->md.al_offset = MD_4kB_SECT;
+ bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
break;
case DRBD_MD_INDEX_FLEX_EXT:
/* just occupy the full device; unit: sectors */
bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
- bdev->md.md_offset = 0;
- bdev->md.al_offset = MD_AL_OFFSET;
- bdev->md.bm_offset = MD_BM_OFFSET;
+ bdev->md.al_offset = MD_4kB_SECT;
+ bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
break;
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
- bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
/* al size is still fixed */
- bdev->md.al_offset = -MD_AL_SECTORS;
+ bdev->md.al_offset = -al_size_sect;
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -735,14 +750,13 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
/* plus the "drbd meta data super block",
* and the activity log; */
- md_size_sect += MD_BM_OFFSET;
+ md_size_sect += MD_4kB_SECT + al_size_sect;
bdev->md.md_size_sect = md_size_sect;
/* bitmap offset is adjusted by 'super' block size */
- bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
+ bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
break;
}
- rcu_read_unlock();
}
/* input size is expected to be in KB */
@@ -805,7 +819,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
- sector_t la_size, u_size;
+ sector_t la_size_sect, u_size;
sector_t size;
char ppb[10];
@@ -828,7 +842,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
prev_first_sect = drbd_md_first_sector(mdev->ldev);
prev_size = mdev->ldev->md.md_size_sect;
- la_size = mdev->ldev->md.la_size_sect;
+ la_size_sect = mdev->ldev->md.la_size_sect;
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
@@ -864,7 +878,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
if (rv == dev_size_error)
goto out;
- la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
+ la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
|| prev_size != mdev->ldev->md.md_size_sect;
@@ -886,9 +900,9 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
drbd_md_mark_dirty(mdev);
}
- if (size > la_size)
+ if (size > la_size_sect)
rv = grew;
- if (size < la_size)
+ if (size < la_size_sect)
rv = shrunk;
out:
lc_unlock(mdev->act_log);
@@ -903,7 +917,7 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
- sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
+ sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
sector_t size = 0;
@@ -917,8 +931,8 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
if (p_size && m_size) {
size = min_t(sector_t, p_size, m_size);
} else {
- if (la_size) {
- size = la_size;
+ if (la_size_sect) {
+ size = la_size_sect;
if (m_size && m_size < size)
size = m_size;
if (p_size && p_size < size)
@@ -1127,15 +1141,32 @@ static bool should_set_defaults(struct genl_info *info)
return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
}
-static void enforce_disk_conf_limits(struct disk_conf *dc)
+static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
{
- if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
- dc->al_extents = DRBD_AL_EXTENTS_MIN;
- if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
- dc->al_extents = DRBD_AL_EXTENTS_MAX;
+ /* This is limited by 16 bit "slot" numbers,
+ * and by available on-disk context storage.
+ *
+ * Also (u16)~0 is special (denotes a "free" extent).
+ *
+ * One transaction occupies one 4kB on-disk block,
+ * we have n such blocks in the on disk ring buffer,
+ * the "current" transaction may fail (n-1),
+ * and there is 919 slot numbers context information per transaction.
+ *
+ * 72 transaction blocks amounts to more than 2**16 context slots,
+ * so cap there first.
+ */
+ const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
+ const unsigned int sufficient_on_disk =
+ (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
+ /AL_CONTEXT_PER_TRANSACTION;
+
+ unsigned int al_size_4k = bdev->md.al_size_4k;
+
+ if (al_size_4k > sufficient_on_disk)
+ return max_al_nr;
- if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
- dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+ return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
}
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
@@ -1182,7 +1213,13 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (!expect(new_disk_conf->resync_rate >= 1))
new_disk_conf->resync_rate = 1;
- enforce_disk_conf_limits(new_disk_conf);
+ if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+ new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
+ new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
+
+ if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
if (fifo_size != mdev->rs_plan_s->size) {
@@ -1330,7 +1367,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- enforce_disk_conf_limits(new_disk_conf);
+ if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
if (!new_plan) {
@@ -1343,6 +1381,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
+ write_lock_irq(&global_state_lock);
+ retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ write_unlock_irq(&global_state_lock);
+ if (retcode != NO_ERROR)
+ goto fail;
+
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
if (nc) {
@@ -1399,8 +1443,16 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
- drbd_md_set_sector_offsets(mdev, nbc);
+ /* Read our meta data super block early.
+ * This also sets other on-disk offsets. */
+ retcode = drbd_md_read(mdev, nbc);
+ if (retcode != NO_ERROR)
+ goto fail;
+
+ if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+ new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
+ new_disk_conf->al_extents = drbd_al_extents_max(nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
@@ -1416,7 +1468,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
min_md_device_sectors = (2<<10);
} else {
max_possible_sectors = DRBD_MAX_SECTORS;
- min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
+ min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
@@ -1467,8 +1519,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (!get_ldev_if_state(mdev, D_ATTACHING))
goto force_diskless;
- drbd_md_set_sector_offsets(mdev, nbc);
-
if (!mdev->bitmap) {
if (drbd_bm_init(mdev)) {
retcode = ERR_NOMEM;
@@ -1476,10 +1526,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
}
}
- retcode = drbd_md_read(mdev, nbc);
- if (retcode != NO_ERROR)
- goto force_diskless_dec;
-
if (mdev->state.conn < C_CONNECTED &&
mdev->state.role == R_PRIMARY &&
(mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
@@ -2158,8 +2204,11 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
return SS_SUCCESS;
case SS_PRIMARY_NOP:
/* Our state checking code wants to see the peer outdated. */
- rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
- pdsk, D_OUTDATED), CS_VERBOSE);
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
+
+ if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
+
break;
case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
@@ -2406,22 +2455,19 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
drbd_flush_workqueue(mdev);
- retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
-
- if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
-
- while (retcode == SS_NEED_CONNECTION) {
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED)
- retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->tconn->req_lock);
-
- if (retcode != SS_NEED_CONNECTION)
- break;
-
+ /* If we happen to be C_STANDALONE R_SECONDARY, just change to
+ * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
+ * try to start a resync handshake as sync target for full sync.
+ */
+ if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
+ retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
+ if (retcode >= SS_SUCCESS) {
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ "set_n_write from invalidate", BM_LOCKED_MASK))
+ retcode = ERR_IO_MD_DISK;
+ }
+ } else
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
- }
drbd_resume_io(mdev);
out:
@@ -2475,21 +2521,22 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
drbd_flush_workqueue(mdev);
- retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
- if (retcode < SS_SUCCESS) {
- if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
- /* The peer will get a resync upon connect anyways.
- * Just make that into a full resync. */
- retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
- if (retcode >= SS_SUCCESS) {
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
- "set_n_write from invalidate_peer",
- BM_LOCKED_SET_ALLOWED))
- retcode = ERR_IO_MD_DISK;
- }
- } else
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
- }
+ /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
+ * in the bitmap. Otherwise, try to start a resync handshake
+ * as sync source for full sync.
+ */
+ if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
+ /* The peer will get a resync upon connect anyways. Just make that
+ into a full resync. */
+ retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+ if (retcode >= SS_SUCCESS) {
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
+ retcode = ERR_IO_MD_DISK;
+ }
+ } else
+ retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
drbd_resume_io(mdev);
out:
@@ -3162,6 +3209,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
CS_VERBOSE + CS_WAIT_COMPLETE);
idr_remove(&mdev->tconn->volumes, mdev->vnr);
idr_remove(&minors, mdev_to_minor(mdev));
+ destroy_workqueue(mdev->submit.wq);
del_gendisk(mdev->vdisk);
synchronize_rcu();
kref_put(&mdev->kref, &drbd_minor_destroy);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 56672a6..bf31d41 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -313,8 +313,14 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
static int drbd_proc_open(struct inode *inode, struct file *file)
{
- if (try_module_get(THIS_MODULE))
- return single_open(file, drbd_seq_show, PDE(inode)->data);
+ int err;
+
+ if (try_module_get(THIS_MODULE)) {
+ err = single_open(file, drbd_seq_show, PDE_DATA(inode));
+ if (err)
+ module_put(THIS_MODULE);
+ return err;
+ }
return -ENODEV;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 83c5ae0..4222aff 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -850,6 +850,7 @@ int drbd_connected(struct drbd_conf *mdev)
err = drbd_send_current_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+ atomic_set(&mdev->ap_in_flight, 0);
mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return err;
}
@@ -2266,7 +2267,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, &peer_req->i);
+ drbd_al_begin_io(mdev, &peer_req->i, true);
}
err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
@@ -2662,7 +2663,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
if (hg == -1 && mdev->state.role == R_PRIMARY) {
enum drbd_state_rv rv2;
- drbd_set_role(mdev, R_SECONDARY, 0);
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
@@ -3993,7 +3993,7 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
clear_bit(DISCARD_MY_DATA, &mdev->flags);
- drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
+ drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
return 0;
}
@@ -4660,8 +4660,8 @@ static int drbd_do_features(struct drbd_tconn *tconn)
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
static int drbd_do_auth(struct drbd_tconn *tconn)
{
- dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
- dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
+ conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
+ conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return -1;
}
#else
@@ -5258,9 +5258,11 @@ int drbd_asender(struct drbd_thread *thi)
bool ping_timeout_active = false;
struct net_conf *nc;
int ping_timeo, tcp_cork, ping_int;
+ struct sched_param param = { .sched_priority = 2 };
- current->policy = SCHED_RR; /* Make this a realtime task! */
- current->rt_priority = 2; /* more important than all other tasks */
+ rv = sched_setscheduler(current, SCHED_RR, &param);
+ if (rv < 0)
+ conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 2b8303a..c24379f 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -34,14 +34,14 @@
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
/* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
+static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
{
- const int rw = bio_data_dir(bio);
+ const int rw = bio_data_dir(req->master_bio);
int cpu;
cpu = part_stat_lock();
part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
- part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9);
(void) cpu; /* The macro invocations above want the cpu argument, I do not like
the compiler warning about cpu only assigned but never used... */
part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -263,8 +263,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
else
root = &mdev->read_requests;
drbd_remove_request_interval(root, req);
- } else if (!(s & RQ_POSTPONED))
- D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
+ }
/* Before we can signal completion to the upper layers,
* we may need to close the current transfer log epoch.
@@ -755,6 +754,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
+
+ case QUEUE_AS_DRBD_BARRIER:
+ start_new_tl_epoch(mdev->tconn);
+ mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
+ break;
};
return rv;
@@ -861,8 +865,10 @@ static void maybe_pull_ahead(struct drbd_conf *mdev)
bool congested = false;
enum drbd_on_congestion on_congestion;
+ rcu_read_lock();
nc = rcu_dereference(tconn->net_conf);
on_congestion = nc ? nc->on_congestion : OC_BLOCK;
+ rcu_read_unlock();
if (on_congestion == OC_BLOCK ||
tconn->agreed_pro_version < 96)
return;
@@ -956,14 +962,8 @@ static int drbd_process_write_request(struct drbd_request *req)
struct drbd_conf *mdev = req->w.mdev;
int remote, send_oos;
- rcu_read_lock();
remote = drbd_should_do_remote(mdev->state);
- if (remote) {
- maybe_pull_ahead(mdev);
- remote = drbd_should_do_remote(mdev->state);
- }
send_oos = drbd_should_send_out_of_sync(mdev->state);
- rcu_read_unlock();
/* Need to replicate writes. Unless it is an empty flush,
* which is better mapped to a DRBD P_BARRIER packet,
@@ -975,8 +975,8 @@ static int drbd_process_write_request(struct drbd_request *req)
/* The only size==0 bios we expect are empty flushes. */
D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
if (remote)
- start_new_tl_epoch(mdev->tconn);
- return 0;
+ _req_mod(req, QUEUE_AS_DRBD_BARRIER);
+ return remote;
}
if (!remote && !send_oos)
@@ -1020,12 +1020,24 @@ drbd_submit_req_private_bio(struct drbd_request *req)
bio_endio(bio, -EIO);
}
-void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
{
- const int rw = bio_rw(bio);
- struct bio_and_error m = { NULL, };
+ spin_lock(&mdev->submit.lock);
+ list_add_tail(&req->tl_requests, &mdev->submit.writes);
+ spin_unlock(&mdev->submit.lock);
+ queue_work(mdev->submit.wq, &mdev->submit.worker);
+}
+
+/* returns the new drbd_request pointer, if the caller is expected to
+ * drbd_send_and_submit() it (to save latency), or NULL if we queued the
+ * request on the submitter thread.
+ * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
+ */
+struct drbd_request *
+drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+{
+ const int rw = bio_data_dir(bio);
struct drbd_request *req;
- bool no_remote = false;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -1035,7 +1047,7 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
- return;
+ return ERR_PTR(-ENOMEM);
}
req->start_time = start_time;
@@ -1044,28 +1056,40 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
req->private_bio = NULL;
}
- /* For WRITES going to the local disk, grab a reference on the target
- * extent. This waits for any resync activity in the corresponding
- * resync extent to finish, and, if necessary, pulls in the target
- * extent into the activity log, which involves further disk io because
- * of transactional on-disk meta data updates.
- * Empty flushes don't need to go into the activity log, they can only
- * flush data for pending writes which are already in there. */
+ /* Update disk stats */
+ _drbd_start_io_acct(mdev, req);
+
if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ if (!drbd_al_begin_io_fastpath(mdev, &req->i)) {
+ drbd_queue_write(mdev, req);
+ return NULL;
+ }
req->rq_state |= RQ_IN_ACT_LOG;
- drbd_al_begin_io(mdev, &req->i);
}
+ return req;
+}
+
+static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ const int rw = bio_rw(req->master_bio);
+ struct bio_and_error m = { NULL, };
+ bool no_remote = false;
+
spin_lock_irq(&mdev->tconn->req_lock);
if (rw == WRITE) {
/* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here.
* Needs to be before the check on drbd_suspended() */
complete_conflicting_writes(req);
+ /* no more giving up req_lock from now on! */
+
+ /* check for congestion, and potentially stop sending
+ * full data updates, but start sending "dirty bits" only. */
+ maybe_pull_ahead(mdev);
}
- /* no more giving up req_lock from now on! */
if (drbd_suspended(mdev)) {
/* push back and retry: */
@@ -1078,9 +1102,6 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
goto out;
}
- /* Update disk stats */
- _drbd_start_io_acct(mdev, req, bio);
-
/* We fail READ/READA early, if we can not serve it.
* We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */
@@ -1137,7 +1158,116 @@ out:
if (m.bio)
complete_master_bio(mdev, &m);
- return;
+}
+
+void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+{
+ struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
+ if (IS_ERR_OR_NULL(req))
+ return;
+ drbd_send_and_submit(mdev, req);
+}
+
+static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
+{
+ struct drbd_request *req, *tmp;
+ list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ const int rw = bio_data_dir(req->master_bio);
+
+ if (rw == WRITE /* rw != WRITE should not even end up here! */
+ && req->private_bio && req->i.size
+ && !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ if (!drbd_al_begin_io_fastpath(mdev, &req->i))
+ continue;
+
+ req->rq_state |= RQ_IN_ACT_LOG;
+ }
+
+ list_del_init(&req->tl_requests);
+ drbd_send_and_submit(mdev, req);
+ }
+}
+
+static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
+ struct list_head *incoming,
+ struct list_head *pending)
+{
+ struct drbd_request *req, *tmp;
+ int wake = 0;
+ int err;
+
+ spin_lock_irq(&mdev->al_lock);
+ list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ err = drbd_al_begin_io_nonblock(mdev, &req->i);
+ if (err == -EBUSY)
+ wake = 1;
+ if (err)
+ continue;
+ req->rq_state |= RQ_IN_ACT_LOG;
+ list_move_tail(&req->tl_requests, pending);
+ }
+ spin_unlock_irq(&mdev->al_lock);
+ if (wake)
+ wake_up(&mdev->al_wait);
+
+ return !list_empty(pending);
+}
+
+void do_submit(struct work_struct *ws)
+{
+ struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
+ LIST_HEAD(incoming);
+ LIST_HEAD(pending);
+ struct drbd_request *req, *tmp;
+
+ for (;;) {
+ spin_lock(&mdev->submit.lock);
+ list_splice_tail_init(&mdev->submit.writes, &incoming);
+ spin_unlock(&mdev->submit.lock);
+
+ submit_fast_path(mdev, &incoming);
+ if (list_empty(&incoming))
+ break;
+
+ wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
+ /* Maybe more was queued, while we prepared the transaction?
+ * Try to stuff them into this transaction as well.
+ * Be strictly non-blocking here, no wait_event, we already
+ * have something to commit.
+ * Stop if we don't make any more progres.
+ */
+ for (;;) {
+ LIST_HEAD(more_pending);
+ LIST_HEAD(more_incoming);
+ bool made_progress;
+
+ /* It is ok to look outside the lock,
+ * it's only an optimization anyways */
+ if (list_empty(&mdev->submit.writes))
+ break;
+
+ spin_lock(&mdev->submit.lock);
+ list_splice_tail_init(&mdev->submit.writes, &more_incoming);
+ spin_unlock(&mdev->submit.lock);
+
+ if (list_empty(&more_incoming))
+ break;
+
+ made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending);
+
+ list_splice_tail_init(&more_pending, &pending);
+ list_splice_tail_init(&more_incoming, &incoming);
+
+ if (!made_progress)
+ break;
+ }
+ drbd_al_begin_io_commit(mdev, false);
+
+ list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
+ list_del_init(&req->tl_requests);
+ drbd_send_and_submit(mdev, req);
+ }
+ }
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index c08d229..978cb1a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -88,6 +88,14 @@ enum drbd_req_event {
QUEUE_FOR_NET_READ,
QUEUE_FOR_SEND_OOS,
+ /* An empty flush is queued as P_BARRIER,
+ * which will cause it to complete "successfully",
+ * even if the local disk flush failed.
+ *
+ * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
+ * only see an error if neither local nor remote data is reachable. */
+ QUEUE_AS_DRBD_BARRIER,
+
SEND_CANCELED,
SEND_FAILED,
HANDED_OVER_TO_NETWORK,
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 0fe220c..90c5be2 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -570,6 +570,13 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
mdev->tconn->agreed_pro_version < 88)
rv = SS_NOT_SUPPORTED;
+ else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+ ns.pdsk == D_UNKNOWN)
+ rv = SS_NEED_CONNECTION;
+
else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
rv = SS_CONNECTED_OUTDATES;
@@ -635,6 +642,10 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
&& os.conn < C_WF_REPORT_PARAMS)
rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+ if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
+ os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
+ rv = SS_OUTDATE_WO_CONN;
+
return rv;
}
@@ -1377,13 +1388,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&drbd_bmio_set_n_write, &abw_start_sync,
"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
- /* We are invalidating our self... */
- if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
- os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
- /* other bitmap operation expected during this phase */
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
- "set_n_write from invalidate", BM_LOCKED_MASK);
-
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
@@ -1748,13 +1752,9 @@ _conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
return SS_CW_FAILED_BY_PEER;
- rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
-
- if (rv == SS_UNKNOWN_ERROR)
- rv = conn_is_valid_transition(tconn, mask, val, 0);
-
- if (rv == SS_SUCCESS)
- rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+ rv = conn_is_valid_transition(tconn, mask, val, 0);
+ if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS)
+ rv = SS_UNKNOWN_ERROR; /* continue waiting */
return rv;
}
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 9a664bd..58e08ff 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -89,6 +89,7 @@ static const char *drbd_state_sw_errors[] = {
[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
+ [-SS_OUTDATE_WO_CONN] = "Need a connection for a graceful disconnect/outdate peer",
[-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
};
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 424dc7b..891c0ec 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -89,7 +89,8 @@ void drbd_md_io_complete(struct bio *bio, int error)
md_io->done = 1;
wake_up(&mdev->misc_wait);
bio_put(bio);
- put_ldev(mdev);
+ if (mdev->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
+ put_ldev(mdev);
}
/* reads on behalf of the partner,
@@ -1410,7 +1411,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, &req->i);
+ drbd_al_begin_io(mdev, &req->i, false);
drbd_req_make_private_bio(req, req->master_bio);
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
@@ -1425,7 +1426,7 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
int resync_after;
while (1) {
- if (!odev->ldev)
+ if (!odev->ldev || odev->state.disk == D_DISKLESS)
return 1;
rcu_read_lock();
resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
@@ -1433,7 +1434,7 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
if (resync_after == -1)
return 1;
odev = minor_to_mdev(resync_after);
- if (!expect(odev))
+ if (!odev)
return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
odev->state.conn <= C_PAUSED_SYNC_T) ||
@@ -1515,7 +1516,7 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
if (o_minor == -1)
return NO_ERROR;
- if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
+ if (o_minor < -1 || o_minor > MINORMASK)
return ERR_RESYNC_AFTER;
/* check for loops */
@@ -1524,6 +1525,15 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
if (odev == mdev)
return ERR_RESYNC_AFTER_CYCLE;
+ /* You are free to depend on diskless, non-existing,
+ * or not yet/no longer existing minors.
+ * We only reject dependency loops.
+ * We cannot follow the dependency chain beyond a detached or
+ * missing minor.
+ */
+ if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
+ return NO_ERROR;
+
rcu_read_lock();
resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
rcu_read_unlock();
@@ -1652,7 +1662,9 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
clear_bit(B_RS_H_DONE, &mdev->flags);
write_lock_irq(&global_state_lock);
- if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ /* Did some connection breakage or IO error race with us? */
+ if (mdev->state.conn < C_CONNECTED
+ || !get_ldev_if_state(mdev, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
mutex_unlock(mdev->state_mutex);
return;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2ddd64a..04ceb7e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3601,7 +3601,7 @@ static void __init config_types(void)
pr_cont("\n");
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
int drive = (long)disk->private_data;
@@ -3615,8 +3615,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
opened_bdev[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
-
- return 0;
}
/*
@@ -3777,7 +3775,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_idx = 0;
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index dfe7583..d92d50f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -230,9 +230,11 @@ static int __do_lo_send_write(struct file *file,
ssize_t bw;
mm_segment_t old_fs = get_fs();
+ file_start_write(file);
set_fs(get_ds());
bw = file->f_op->write(file, buf, len, &pos);
set_fs(old_fs);
+ file_end_write(file);
if (likely(bw == len))
return 0;
printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
@@ -1516,7 +1518,7 @@ out:
return err;
}
-static int lo_release(struct gendisk *disk, fmode_t mode)
+static void lo_release(struct gendisk *disk, fmode_t mode)
{
struct loop_device *lo = disk->private_data;
int err;
@@ -1533,7 +1535,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
*/
err = loop_clr_fd(lo);
if (!err)
- goto out_unlocked;
+ return;
} else {
/*
* Otherwise keep thread (if running) and config,
@@ -1544,8 +1546,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
-out_unlocked:
- return 0;
}
static const struct block_device_operations lo_fops = {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 076ae7f..a56cfcd 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -780,6 +780,7 @@ static const struct block_device_operations mg_disk_ops = {
.getgeo = mg_getgeo
};
+#ifdef CONFIG_PM_SLEEP
static int mg_suspend(struct device *dev)
{
struct mg_drv_data *prv_data = dev->platform_data;
@@ -824,6 +825,7 @@ static int mg_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 32c6780..847107e 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -728,7 +728,10 @@ static void mtip_async_complete(struct mtip_port *port,
atomic_set(&port->commands[tag].active, 0);
release_slot(port, tag);
- up(&port->cmd_slot);
+ if (unlikely(command->unaligned))
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
}
/*
@@ -1560,10 +1563,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
}
#endif
+#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
/* Demux ID.DRAT & ID.RZAT to determine trim support */
if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
port->dd->trim_supp = true;
else
+#endif
port->dd->trim_supp = false;
/* Set the identify buffer as valid. */
@@ -2557,7 +2562,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
*/
static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
- void *data, int dir)
+ void *data, int dir, int unaligned)
{
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
@@ -2570,6 +2575,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
command->scatter_ents = nents;
+ command->unaligned = unaligned;
/*
* The number of retries for this command before it is
* reported as a failure to the upper layers.
@@ -2598,6 +2604,9 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
fis->res3 = 0;
fill_command_sg(dd, command, nents);
+ if (unaligned)
+ fis->device |= 1 << 7;
+
/* Populate the command header */
command->command_header->opts =
__force_bit2int cpu_to_le32(
@@ -2644,9 +2653,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
* return value
* None
*/
-static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag,
+ int unaligned)
{
+ struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+ &dd->port->cmd_slot;
release_slot(dd->port, tag);
+ up(sem);
}
/*
@@ -2661,22 +2674,25 @@ static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
* or NULL if no command slots are available.
*/
static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
- int *tag)
+ int *tag, int unaligned)
{
+ struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+ &dd->port->cmd_slot;
+
/*
* It is possible that, even with this semaphore, a thread
* may think that no command slots are available. Therefore, we
* need to make an attempt to get_slot().
*/
- down(&dd->port->cmd_slot);
+ down(sem);
*tag = get_slot(dd->port);
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
- up(&dd->port->cmd_slot);
+ up(sem);
return NULL;
}
if (unlikely(*tag < 0)) {
- up(&dd->port->cmd_slot);
+ up(sem);
return NULL;
}
@@ -3010,6 +3026,11 @@ static inline void hba_setup(struct driver_data *dd)
dd->mmio + HOST_HSORG);
}
+static int mtip_device_unaligned_constrained(struct driver_data *dd)
+{
+ return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
+}
+
/*
* Detect the details of the product, and store anything needed
* into the driver data structure. This includes product type and
@@ -3232,8 +3253,15 @@ static int mtip_hw_init(struct driver_data *dd)
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
dd->work[i].port = dd->port;
+ /* Enable unaligned IO constraints for some devices */
+ if (mtip_device_unaligned_constrained(dd))
+ dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
+ else
+ dd->unal_qdepth = 0;
+
/* Counting semaphore to track command slot usage */
- sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+ sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth);
+ sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
/* Spinlock to prevent concurrent issue */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
@@ -3836,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
struct scatterlist *sg;
struct bio_vec *bvec;
int nents = 0;
- int tag = 0;
+ int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
@@ -3872,7 +3900,15 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
return;
}
- sg = mtip_hw_get_scatterlist(dd, &tag);
+ if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
+ dd->unal_qdepth) {
+ if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ unaligned = 1;
+ else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
+ unaligned = 1;
+ }
+
+ sg = mtip_hw_get_scatterlist(dd, &tag, unaligned);
if (likely(sg != NULL)) {
blk_queue_bounce(queue, &bio);
@@ -3880,7 +3916,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
dev_warn(&dd->pdev->dev,
"Maximum number of SGL entries exceeded\n");
bio_io_error(bio);
- mtip_hw_release_scatterlist(dd, tag);
+ mtip_hw_release_scatterlist(dd, tag, unaligned);
return;
}
@@ -3900,7 +3936,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
tag,
bio_endio,
bio,
- bio_data_dir(bio));
+ bio_data_dir(bio),
+ unaligned);
} else
bio_io_error(bio);
}
@@ -4156,26 +4193,24 @@ static int mtip_block_remove(struct driver_data *dd)
*/
static int mtip_block_shutdown(struct driver_data *dd)
{
- dev_info(&dd->pdev->dev,
- "Shutting down %s ...\n", dd->disk->disk_name);
-
/* Delete our gendisk structure, and cleanup the blk queue. */
if (dd->disk) {
- if (dd->disk->queue)
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
+
+ if (dd->disk->queue) {
del_gendisk(dd->disk);
- else
+ blk_cleanup_queue(dd->queue);
+ } else
put_disk(dd->disk);
+ dd->disk = NULL;
+ dd->queue = NULL;
}
-
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
- blk_cleanup_queue(dd->queue);
- dd->disk = NULL;
- dd->queue = NULL;
-
mtip_hw_shutdown(dd);
return 0;
}
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 8e8334c..3bb8a29 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -52,6 +52,9 @@
#define MTIP_FTL_REBUILD_MAGIC 0xED51
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
+/* unaligned IO handling */
+#define MTIP_MAX_UNALIGNED_SLOTS 8
+
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
@@ -333,6 +336,8 @@ struct mtip_cmd {
int scatter_ents; /* Number of scatter list entries used */
+ int unaligned; /* command is unaligned on 4k boundary */
+
struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
int retries; /* The number of retries left for this command. */
@@ -452,6 +457,10 @@ struct mtip_port {
* command slots available.
*/
struct semaphore cmd_slot;
+
+ /* Semaphore to control queue depth of unaligned IOs */
+ struct semaphore cmd_slot_unal;
+
/* Spinlock for working around command-issue bug. */
spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
};
@@ -502,6 +511,8 @@ struct driver_data {
int isr_binding;
+ int unal_qdepth; /* qdepth of unaligned IO queue */
+
struct list_head online_list; /* linkage for online list */
struct list_head remove_list; /* linkage for removing list */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index ba2b6b5..e76bdc0 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -236,13 +236,12 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int pcd_block_release(struct gendisk *disk, fmode_t mode)
+static void pcd_block_release(struct gendisk *disk, fmode_t mode)
{
struct pcd_unit *cd = disk->private_data;
mutex_lock(&pcd_mutex);
cdrom_release(&cd->info, mode);
mutex_unlock(&pcd_mutex);
- return 0;
}
static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 831e3ac..19ad8f0 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -783,7 +783,7 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode,
}
}
-static int pd_release(struct gendisk *p, fmode_t mode)
+static void pd_release(struct gendisk *p, fmode_t mode)
{
struct pd_unit *disk = p->private_data;
@@ -791,8 +791,6 @@ static int pd_release(struct gendisk *p, fmode_t mode)
if (!--disk->access && disk->removable)
pd_special_command(disk, pd_door_unlock);
mutex_unlock(&pd_mutex);
-
- return 0;
}
static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ec8f9ed..f5c86d5 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -211,7 +211,7 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static int pf_release(struct gendisk *disk, fmode_t mode);
+static void pf_release(struct gendisk *disk, fmode_t mode);
static int pf_detect(void);
static void do_pf_read(void);
@@ -360,14 +360,15 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u
return 0;
}
-static int pf_release(struct gendisk *disk, fmode_t mode)
+static void pf_release(struct gendisk *disk, fmode_t mode)
{
struct pf_unit *pf = disk->private_data;
mutex_lock(&pf_mutex);
if (pf->access <= 0) {
mutex_unlock(&pf_mutex);
- return -EINVAL;
+ WARN_ON(1);
+ return;
}
pf->access--;
@@ -376,8 +377,6 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
pf_lock(pf, 0);
mutex_unlock(&pf_mutex);
- return 0;
-
}
static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2e7de7a..3c08983 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -901,7 +901,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
pd->iosched.successive_reads += bio->bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
+ pd->iosched.last_write = bio_end_sector(bio);
}
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
@@ -948,31 +948,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
}
/*
- * Copy CD_FRAMESIZE bytes from src_bio into a destination page
- */
-static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
-{
- unsigned int copy_size = CD_FRAMESIZE;
-
- while (copy_size > 0) {
- struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
- void *vfrom = kmap_atomic(src_bvl->bv_page) +
- src_bvl->bv_offset + offs;
- void *vto = page_address(dst_page) + dst_offs;
- int len = min_t(int, copy_size, src_bvl->bv_len - offs);
-
- BUG_ON(len < 0);
- memcpy(vto, vfrom, len);
- kunmap_atomic(vfrom);
-
- seg++;
- offs = 0;
- dst_offs += len;
- copy_size -= len;
- }
-}
-
-/*
* Copy all data for this packet to pkt->pages[], so that
* a) The number of required segments for the write bio is minimized, which
* is necessary for some scsi controllers.
@@ -1181,16 +1156,15 @@ static int pkt_start_recovery(struct packet_data *pkt)
new_sector = new_block * (CD_FRAMESIZE >> 9);
pkt->sector = new_sector;
+ bio_reset(pkt->bio);
+ pkt->bio->bi_bdev = pd->bdev;
+ pkt->bio->bi_rw = REQ_WRITE;
pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_next = NULL;
- pkt->bio->bi_flags = 1 << BIO_UPTODATE;
- pkt->bio->bi_idx = 0;
+ pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_vcnt = pkt->frames;
- BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
- BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
- BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
- BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
- BUG_ON(pkt->bio->bi_private != pkt);
+ pkt->bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->bio->bi_private = pkt;
drop_super(sb);
return 1;
@@ -1325,55 +1299,35 @@ try_next_bio:
*/
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
- struct bio *bio;
int f;
- int frames_write;
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
+ bio_reset(pkt->w_bio);
+ pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_bdev = pd->bdev;
+ pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->w_bio->bi_private = pkt;
+
+ /* XXX: locking? */
for (f = 0; f < pkt->frames; f++) {
bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+ BUG();
}
+ VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
*/
- frames_write = 0;
spin_lock(&pkt->lock);
- bio_list_for_each(bio, &pkt->orig_bios) {
- int segment = bio->bi_idx;
- int src_offs = 0;
- int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_size / CD_FRAMESIZE;
- BUG_ON(first_frame < 0);
- BUG_ON(first_frame + num_frames > pkt->frames);
- for (f = first_frame; f < first_frame + num_frames; f++) {
- struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
-
- while (src_offs >= src_bvl->bv_len) {
- src_offs -= src_bvl->bv_len;
- segment++;
- BUG_ON(segment >= bio->bi_vcnt);
- src_bvl = bio_iovec_idx(bio, segment);
- }
+ bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
- if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
- bvec[f].bv_page = src_bvl->bv_page;
- bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
- } else {
- pkt_copy_bio_data(bio, segment, src_offs,
- bvec[f].bv_page, bvec[f].bv_offset);
- }
- src_offs += CD_FRAMESIZE;
- frames_write++;
- }
- }
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
- frames_write, (unsigned long long)pkt->sector);
- BUG_ON(frames_write != pkt->write_size);
+ pkt->write_size, (unsigned long long)pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
pkt_make_local_copy(pkt, bvec);
@@ -1383,16 +1337,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
}
/* Start the write request */
- bio_reset(pkt->w_bio);
- pkt->w_bio->bi_sector = pkt->sector;
- pkt->w_bio->bi_bdev = pd->bdev;
- pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
- pkt->w_bio->bi_private = pkt;
- for (f = 0; f < pkt->frames; f++)
- if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
- BUG();
- VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
atomic_set(&pkt->io_wait, 1);
pkt->w_bio->bi_rw = WRITE;
pkt_queue_bio(pd, pkt->w_bio);
@@ -2376,10 +2320,9 @@ out:
return ret;
}
-static int pkt_close(struct gendisk *disk, fmode_t mode)
+static void pkt_close(struct gendisk *disk, fmode_t mode)
{
struct pktcdvd_device *pd = disk->private_data;
- int ret = 0;
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
@@ -2391,7 +2334,6 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
- return ret;
}
@@ -2433,7 +2375,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
cloned_bio->bi_bdev = pd->bdev;
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio->bi_size >> 9;
+ pd->stats.secs_r += bio_sectors(bio);
pkt_queue_bio(pd, cloned_bio);
return;
}
@@ -2454,7 +2396,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
zone = ZONE(bio->bi_sector, pd);
VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector,
- (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
+ (unsigned long long)bio_end_sector(bio));
/* Check if we have to split the bio */
{
@@ -2462,7 +2404,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
sector_t last_zone;
int first_sectors;
- last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
+ last_zone = ZONE(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
@@ -2648,7 +2590,7 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, pkt_seq_show, PDE(inode)->data);
+ return single_open(file, pkt_seq_show, PDE_DATA(inode));
}
static const struct file_operations pkt_proc_fops = {
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 75e112d..06a2e53 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -525,7 +525,7 @@ static int ps3vram_proc_show(struct seq_file *m, void *v)
static int ps3vram_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ps3vram_proc_show, PDE(inode)->data);
+ return single_open(file, ps3vram_proc_show, PDE_DATA(inode));
}
static const struct file_operations ps3vram_proc_fops = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b7b7a88..ca63104 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1,3 +1,4 @@
+
/*
rbd.c -- Export ceph rados objects as a Linux block device
@@ -32,12 +33,14 @@
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
#include <linux/parser.h>
+#include <linux/bsearch.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/slab.h>
#include "rbd_types.h"
@@ -52,13 +55,6 @@
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
-/* It might be useful to have these defined elsewhere */
-
-#define U8_MAX ((u8) (~0U))
-#define U16_MAX ((u16) (~0U))
-#define U32_MAX ((u32) (~0U))
-#define U64_MAX ((u64) (~0ULL))
-
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
@@ -72,6 +68,8 @@
#define RBD_SNAP_HEAD_NAME "-"
+#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
+
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
@@ -80,11 +78,14 @@
/* Feature bits */
-#define RBD_FEATURE_LAYERING 1
+#define RBD_FEATURE_LAYERING (1<<0)
+#define RBD_FEATURE_STRIPINGV2 (1<<1)
+#define RBD_FEATURES_ALL \
+ (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
/* Features supported by this (client software) implementation. */
-#define RBD_FEATURES_ALL (0)
+#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
@@ -112,7 +113,8 @@ struct rbd_image_header {
char *snap_names;
u64 *snap_sizes;
- u64 obj_version;
+ u64 stripe_unit;
+ u64 stripe_count;
};
/*
@@ -142,13 +144,13 @@ struct rbd_image_header {
*/
struct rbd_spec {
u64 pool_id;
- char *pool_name;
+ const char *pool_name;
- char *image_id;
- char *image_name;
+ const char *image_id;
+ const char *image_name;
u64 snap_id;
- char *snap_name;
+ const char *snap_name;
struct kref kref;
};
@@ -174,13 +176,44 @@ enum obj_request_type {
OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
+enum obj_req_flags {
+ OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
+ OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
+ OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
+ OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
+};
+
struct rbd_obj_request {
const char *object_name;
u64 offset; /* object start byte */
u64 length; /* bytes from offset */
+ unsigned long flags;
- struct rbd_img_request *img_request;
- struct list_head links; /* img_request->obj_requests */
+ /*
+ * An object request associated with an image will have its
+ * img_data flag set; a standalone object request will not.
+ *
+ * A standalone object request will have which == BAD_WHICH
+ * and a null obj_request pointer.
+ *
+ * An object request initiated in support of a layered image
+ * object (to check for its existence before a write) will
+ * have which == BAD_WHICH and a non-null obj_request pointer.
+ *
+ * Finally, an object request for rbd image data will have
+ * which != BAD_WHICH, and will have a non-null img_request
+ * pointer. The value of which will be in the range
+ * 0..(img_request->obj_request_count-1).
+ */
+ union {
+ struct rbd_obj_request *obj_request; /* STAT op */
+ struct {
+ struct rbd_img_request *img_request;
+ u64 img_offset;
+ /* links for img_request->obj_requests list */
+ struct list_head links;
+ };
+ };
u32 which; /* posn image request list */
enum obj_request_type type;
@@ -191,13 +224,12 @@ struct rbd_obj_request {
u32 page_count;
};
};
+ struct page **copyup_pages;
struct ceph_osd_request *osd_req;
u64 xferred; /* bytes transferred */
- u64 version;
int result;
- atomic_t done;
rbd_obj_callback_t callback;
struct completion completion;
@@ -205,19 +237,31 @@ struct rbd_obj_request {
struct kref kref;
};
+enum img_req_flags {
+ IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
+ IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
+ IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
+};
+
struct rbd_img_request {
- struct request *rq;
struct rbd_device *rbd_dev;
u64 offset; /* starting image byte offset */
u64 length; /* byte count from offset */
- bool write_request; /* false for read */
+ unsigned long flags;
union {
+ u64 snap_id; /* for reads */
struct ceph_snap_context *snapc; /* for writes */
- u64 snap_id; /* for reads */
};
+ union {
+ struct request *rq; /* block request */
+ struct rbd_obj_request *obj_request; /* obj req initiator */
+ };
+ struct page **copyup_pages;
spinlock_t completion_lock;/* protects next_completion */
u32 next_completion;
rbd_img_callback_t callback;
+ u64 xferred;/* aggregate bytes transferred */
+ int result; /* first nonzero obj_request result */
u32 obj_request_count;
struct list_head obj_requests; /* rbd_obj_request structs */
@@ -232,15 +276,6 @@ struct rbd_img_request {
#define for_each_obj_request_safe(ireq, oreq, n) \
list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
-struct rbd_snap {
- struct device dev;
- const char *name;
- u64 size;
- struct list_head node;
- u64 id;
- u64 features;
-};
-
struct rbd_mapping {
u64 size;
u64 features;
@@ -276,6 +311,7 @@ struct rbd_device {
struct rbd_spec *parent_spec;
u64 parent_overlap;
+ struct rbd_device *parent;
/* protects updating the header */
struct rw_semaphore header_rwsem;
@@ -284,9 +320,6 @@ struct rbd_device {
struct list_head node;
- /* list of snapshots */
- struct list_head snaps;
-
/* sysfs related */
struct device dev;
unsigned long open_count; /* protected by lock */
@@ -312,16 +345,21 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock);
static LIST_HEAD(rbd_client_list); /* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
-static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
-static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
+/* Slab caches for frequently-allocated structures */
+
+static struct kmem_cache *rbd_img_request_cache;
+static struct kmem_cache *rbd_obj_request_cache;
+static struct kmem_cache *rbd_segment_name_cache;
-static void rbd_dev_release(struct device *dev);
-static void rbd_remove_snap_dev(struct rbd_snap *snap);
+static int rbd_img_request_submit(struct rbd_img_request *img_request);
+
+static void rbd_dev_device_release(struct device *dev);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
size_t count);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -383,8 +421,19 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
-static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
+static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
+ u64 snap_id);
+static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ u8 *order, u64 *snap_size);
+static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_features);
+static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -411,7 +460,7 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int rbd_release(struct gendisk *disk, fmode_t mode)
+static void rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
unsigned long open_count_before;
@@ -424,8 +473,6 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
put_device(&rbd_dev->dev);
mutex_unlock(&ctl_mutex);
-
- return 0;
}
static const struct block_device_operations rbd_bd_ops = {
@@ -484,6 +531,13 @@ out_opt:
return ERR_PTR(ret);
}
+static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
+{
+ kref_get(&rbdc->kref);
+
+ return rbdc;
+}
+
/*
* Find a ceph client with specific addr and configuration. If
* found, bump its reference count.
@@ -499,7 +553,8 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
spin_lock(&rbd_client_list_lock);
list_for_each_entry(client_node, &rbd_client_list, node) {
if (!ceph_compare_options(ceph_opts, client_node->client)) {
- kref_get(&client_node->kref);
+ __rbd_get_client(client_node);
+
found = true;
break;
}
@@ -722,7 +777,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->snap_sizes[i] =
le64_to_cpu(ondisk->snaps[i].image_size);
} else {
- WARN_ON(ondisk->snap_names_len);
header->snap_names = NULL;
header->snap_sizes = NULL;
}
@@ -735,18 +789,13 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
/* Allocate and fill in the snapshot context */
header->image_size = le64_to_cpu(ondisk->image_size);
- size = sizeof (struct ceph_snap_context);
- size += snap_count * sizeof (header->snapc->snaps[0]);
- header->snapc = kzalloc(size, GFP_KERNEL);
+
+ header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!header->snapc)
goto out_err;
-
- atomic_set(&header->snapc->nref, 1);
header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
- header->snapc->num_snaps = snap_count;
for (i = 0; i < snap_count; i++)
- header->snapc->snaps[i] =
- le64_to_cpu(ondisk->snaps[i].id);
+ header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
return 0;
@@ -761,70 +810,174 @@ out_err:
return -ENOMEM;
}
-static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
+{
+ const char *snap_name;
+
+ rbd_assert(which < rbd_dev->header.snapc->num_snaps);
+
+ /* Skip over names until we find the one we are looking for */
+
+ snap_name = rbd_dev->header.snap_names;
+ while (which--)
+ snap_name += strlen(snap_name) + 1;
+
+ return kstrdup(snap_name, GFP_KERNEL);
+}
+
+/*
+ * Snapshot id comparison function for use with qsort()/bsearch().
+ * Note that result is for snapshots in *descending* order.
+ */
+static int snapid_compare_reverse(const void *s1, const void *s2)
+{
+ u64 snap_id1 = *(u64 *)s1;
+ u64 snap_id2 = *(u64 *)s2;
+
+ if (snap_id1 < snap_id2)
+ return 1;
+ return snap_id1 == snap_id2 ? 0 : -1;
+}
+
+/*
+ * Search a snapshot context to see if the given snapshot id is
+ * present.
+ *
+ * Returns the position of the snapshot id in the array if it's found,
+ * or BAD_SNAP_INDEX otherwise.
+ *
+ * Note: The snapshot array is in kept sorted (by the osd) in
+ * reverse order, highest snapshot id first.
+ */
+static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
- struct rbd_snap *snap;
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ u64 *found;
+ found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
+ sizeof (snap_id), snapid_compare_reverse);
+
+ return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
+}
+
+static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
+ u64 snap_id)
+{
+ u32 which;
+
+ which = rbd_dev_snap_index(rbd_dev, snap_id);
+ if (which == BAD_SNAP_INDEX)
+ return NULL;
+
+ return _rbd_dev_v1_snap_name(rbd_dev, which);
+}
+
+static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+{
if (snap_id == CEPH_NOSNAP)
return RBD_SNAP_HEAD_NAME;
- list_for_each_entry(snap, &rbd_dev->snaps, node)
- if (snap_id == snap->id)
- return snap->name;
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ if (rbd_dev->image_format == 1)
+ return rbd_dev_v1_snap_name(rbd_dev, snap_id);
- return NULL;
+ return rbd_dev_v2_snap_name(rbd_dev, snap_id);
}
-static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
+static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_size)
{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ if (snap_id == CEPH_NOSNAP) {
+ *snap_size = rbd_dev->header.image_size;
+ } else if (rbd_dev->image_format == 1) {
+ u32 which;
- struct rbd_snap *snap;
+ which = rbd_dev_snap_index(rbd_dev, snap_id);
+ if (which == BAD_SNAP_INDEX)
+ return -ENOENT;
- list_for_each_entry(snap, &rbd_dev->snaps, node) {
- if (!strcmp(snap_name, snap->name)) {
- rbd_dev->spec->snap_id = snap->id;
- rbd_dev->mapping.size = snap->size;
- rbd_dev->mapping.features = snap->features;
+ *snap_size = rbd_dev->header.snap_sizes[which];
+ } else {
+ u64 size = 0;
+ int ret;
- return 0;
- }
+ ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
+ if (ret)
+ return ret;
+
+ *snap_size = size;
}
+ return 0;
+}
+
+static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_features)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ if (snap_id == CEPH_NOSNAP) {
+ *snap_features = rbd_dev->header.features;
+ } else if (rbd_dev->image_format == 1) {
+ *snap_features = 0; /* No features for format 1 */
+ } else {
+ u64 features = 0;
+ int ret;
- return -ENOENT;
+ ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
+ if (ret)
+ return ret;
+
+ *snap_features = features;
+ }
+ return 0;
}
-static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
+static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
+ const char *snap_name = rbd_dev->spec->snap_name;
+ u64 snap_id;
+ u64 size = 0;
+ u64 features = 0;
int ret;
- if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
- sizeof (RBD_SNAP_HEAD_NAME))) {
- rbd_dev->spec->snap_id = CEPH_NOSNAP;
- rbd_dev->mapping.size = rbd_dev->header.image_size;
- rbd_dev->mapping.features = rbd_dev->header.features;
- ret = 0;
+ if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
+ snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
+ if (snap_id == CEPH_NOSNAP)
+ return -ENOENT;
} else {
- ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
- if (ret < 0)
- goto done;
- rbd_dev->mapping.read_only = true;
+ snap_id = CEPH_NOSNAP;
}
- set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-done:
- return ret;
+ ret = rbd_snap_size(rbd_dev, snap_id, &size);
+ if (ret)
+ return ret;
+ ret = rbd_snap_features(rbd_dev, snap_id, &features);
+ if (ret)
+ return ret;
+
+ rbd_dev->mapping.size = size;
+ rbd_dev->mapping.features = features;
+
+ /* If we are mapping a snapshot it must be marked read-only */
+
+ if (snap_id != CEPH_NOSNAP)
+ rbd_dev->mapping.read_only = true;
+
+ return 0;
}
-static void rbd_header_free(struct rbd_image_header *header)
+static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
- kfree(header->object_prefix);
- header->object_prefix = NULL;
- kfree(header->snap_sizes);
- header->snap_sizes = NULL;
- kfree(header->snap_names);
- header->snap_names = NULL;
- ceph_put_snap_context(header->snapc);
- header->snapc = NULL;
+ rbd_dev->mapping.size = 0;
+ rbd_dev->mapping.features = 0;
+ rbd_dev->mapping.read_only = true;
+}
+
+static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
+{
+ rbd_dev->mapping.size = 0;
+ rbd_dev->mapping.features = 0;
+ rbd_dev->mapping.read_only = true;
}
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -833,7 +986,7 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
u64 segment;
int ret;
- name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
+ name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
@@ -849,6 +1002,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
return name;
}
+static void rbd_segment_name_free(const char *name)
+{
+ /* The explicit cast here is needed to drop the const qualifier */
+
+ kmem_cache_free(rbd_segment_name_cache, (void *)name);
+}
+
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
@@ -921,6 +1081,37 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
}
/*
+ * similar to zero_bio_chain(), zeros data defined by a page array,
+ * starting at the given byte offset from the start of the array and
+ * continuing up to the given end offset. The pages array is
+ * assumed to be big enough to hold all bytes up to the end.
+ */
+static void zero_pages(struct page **pages, u64 offset, u64 end)
+{
+ struct page **page = &pages[offset >> PAGE_SHIFT];
+
+ rbd_assert(end > offset);
+ rbd_assert(end - offset <= (u64)SIZE_MAX);
+ while (offset < end) {
+ size_t page_offset;
+ size_t length;
+ unsigned long flags;
+ void *kaddr;
+
+ page_offset = (size_t)(offset & ~PAGE_MASK);
+ length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
+ local_irq_save(flags);
+ kaddr = kmap_atomic(*page);
+ memset(kaddr + page_offset, 0, length);
+ kunmap_atomic(kaddr);
+ local_irq_restore(flags);
+
+ offset += length;
+ page++;
+ }
+}
+
+/*
* Clone a portion of a bio, starting at the given byte offset
* and continuing for the number of bytes indicated.
*/
@@ -952,7 +1143,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
/* Find first affected segment... */
resid = offset;
- __bio_for_each_segment(bv, bio_src, idx, 0) {
+ bio_for_each_segment(bv, bio_src, idx) {
if (resid < bv->bv_len)
break;
resid -= bv->bv_len;
@@ -1064,6 +1255,77 @@ out_err:
return NULL;
}
+/*
+ * The default/initial value for all object request flags is 0. For
+ * each flag, once its value is set to 1 it is never reset to 0
+ * again.
+ */
+static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
+{
+ if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = obj_request->img_request->rbd_dev;
+ rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
+ obj_request);
+ }
+}
+
+static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
+}
+
+static void obj_request_done_set(struct rbd_obj_request *obj_request)
+{
+ if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
+ struct rbd_device *rbd_dev = NULL;
+
+ if (obj_request_img_data_test(obj_request))
+ rbd_dev = obj_request->img_request->rbd_dev;
+ rbd_warn(rbd_dev, "obj_request %p already marked done\n",
+ obj_request);
+ }
+}
+
+static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
+}
+
+/*
+ * This sets the KNOWN flag after (possibly) setting the EXISTS
+ * flag. The latter is set based on the "exists" value provided.
+ *
+ * Note that for our purposes once an object exists it never goes
+ * away again. It's possible that the response from two existence
+ * checks are separated by the creation of the target object, and
+ * the first ("doesn't exist") response arrives *after* the second
+ * ("does exist"). In that case we ignore the second one.
+ */
+static void obj_request_existence_set(struct rbd_obj_request *obj_request,
+ bool exists)
+{
+ if (exists)
+ set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
+ set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
+ smp_mb();
+}
+
+static bool obj_request_known_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
+}
+
+static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
+}
+
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -1101,9 +1363,11 @@ static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
{
rbd_assert(obj_request->img_request == NULL);
- rbd_obj_request_get(obj_request);
+ /* Image request now owns object's original reference */
obj_request->img_request = img_request;
obj_request->which = img_request->obj_request_count;
+ rbd_assert(!obj_request_img_data_test(obj_request));
+ obj_request_img_data_set(obj_request);
rbd_assert(obj_request->which != BAD_WHICH);
img_request->obj_request_count++;
list_add_tail(&obj_request->links, &img_request->obj_requests);
@@ -1123,6 +1387,7 @@ static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
img_request->obj_request_count--;
rbd_assert(obj_request->which == img_request->obj_request_count);
obj_request->which = BAD_WHICH;
+ rbd_assert(obj_request_img_data_test(obj_request));
rbd_assert(obj_request->img_request == img_request);
obj_request->img_request = NULL;
obj_request->callback = NULL;
@@ -1141,76 +1406,6 @@ static bool obj_request_type_valid(enum obj_request_type type)
}
}
-static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
-{
- struct ceph_osd_req_op *op;
- va_list args;
- size_t size;
-
- op = kzalloc(sizeof (*op), GFP_NOIO);
- if (!op)
- return NULL;
- op->op = opcode;
- va_start(args, opcode);
- switch (opcode) {
- case CEPH_OSD_OP_READ:
- case CEPH_OSD_OP_WRITE:
- /* rbd_osd_req_op_create(READ, offset, length) */
- /* rbd_osd_req_op_create(WRITE, offset, length) */
- op->extent.offset = va_arg(args, u64);
- op->extent.length = va_arg(args, u64);
- if (opcode == CEPH_OSD_OP_WRITE)
- op->payload_len = op->extent.length;
- break;
- case CEPH_OSD_OP_STAT:
- break;
- case CEPH_OSD_OP_CALL:
- /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
- op->cls.class_name = va_arg(args, char *);
- size = strlen(op->cls.class_name);
- rbd_assert(size <= (size_t) U8_MAX);
- op->cls.class_len = size;
- op->payload_len = size;
-
- op->cls.method_name = va_arg(args, char *);
- size = strlen(op->cls.method_name);
- rbd_assert(size <= (size_t) U8_MAX);
- op->cls.method_len = size;
- op->payload_len += size;
-
- op->cls.argc = 0;
- op->cls.indata = va_arg(args, void *);
- size = va_arg(args, size_t);
- rbd_assert(size <= (size_t) U32_MAX);
- op->cls.indata_len = (u32) size;
- op->payload_len += size;
- break;
- case CEPH_OSD_OP_NOTIFY_ACK:
- case CEPH_OSD_OP_WATCH:
- /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
- /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
- op->watch.cookie = va_arg(args, u64);
- op->watch.ver = va_arg(args, u64);
- op->watch.ver = cpu_to_le64(op->watch.ver);
- if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
- op->watch.flag = (u8) 1;
- break;
- default:
- rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
- kfree(op);
- op = NULL;
- break;
- }
- va_end(args);
-
- return op;
-}
-
-static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
-{
- kfree(op);
-}
-
static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
struct rbd_obj_request *obj_request)
{
@@ -1221,7 +1416,24 @@ static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
+
dout("%s: img %p\n", __func__, img_request);
+
+ /*
+ * If no error occurred, compute the aggregate transfer
+ * count for the image request. We could instead use
+ * atomic64_cmpxchg() to update it as each object request
+ * completes; not clear which way is better off hand.
+ */
+ if (!img_request->result) {
+ struct rbd_obj_request *obj_request;
+ u64 xferred = 0;
+
+ for_each_obj_request(img_request, obj_request)
+ xferred += obj_request->xferred;
+ img_request->xferred = xferred;
+ }
+
if (img_request->callback)
img_request->callback(img_request);
else
@@ -1237,39 +1449,56 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
return wait_for_completion_interruptible(&obj_request->completion);
}
-static void obj_request_done_init(struct rbd_obj_request *obj_request)
+/*
+ * The default/initial value for all image request flags is 0. Each
+ * is conditionally set to 1 at image request initialization time
+ * and currently never change thereafter.
+ */
+static void img_request_write_set(struct rbd_img_request *img_request)
{
- atomic_set(&obj_request->done, 0);
- smp_wmb();
+ set_bit(IMG_REQ_WRITE, &img_request->flags);
+ smp_mb();
}
-static void obj_request_done_set(struct rbd_obj_request *obj_request)
+static bool img_request_write_test(struct rbd_img_request *img_request)
{
- int done;
+ smp_mb();
+ return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
+}
- done = atomic_inc_return(&obj_request->done);
- if (done > 1) {
- struct rbd_img_request *img_request = obj_request->img_request;
- struct rbd_device *rbd_dev;
+static void img_request_child_set(struct rbd_img_request *img_request)
+{
+ set_bit(IMG_REQ_CHILD, &img_request->flags);
+ smp_mb();
+}
- rbd_dev = img_request ? img_request->rbd_dev : NULL;
- rbd_warn(rbd_dev, "obj_request %p was already done\n",
- obj_request);
- }
+static bool img_request_child_test(struct rbd_img_request *img_request)
+{
+ smp_mb();
+ return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}
-static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+static void img_request_layered_set(struct rbd_img_request *img_request)
+{
+ set_bit(IMG_REQ_LAYERED, &img_request->flags);
+ smp_mb();
+}
+
+static bool img_request_layered_test(struct rbd_img_request *img_request)
{
smp_mb();
- return atomic_read(&obj_request->done) != 0;
+ return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
+ u64 xferred = obj_request->xferred;
+ u64 length = obj_request->length;
+
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, obj_request->img_request, obj_request->result,
- obj_request->xferred, obj_request->length);
+ xferred, length);
/*
* ENOENT means a hole in the image. We zero-fill the
* entire length of the request. A short read also implies
@@ -1277,15 +1506,20 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
* update the xferred count to indicate the whole request
* was satisfied.
*/
- BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
+ rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
if (obj_request->result == -ENOENT) {
- zero_bio_chain(obj_request->bio_list, 0);
+ if (obj_request->type == OBJ_REQUEST_BIO)
+ zero_bio_chain(obj_request->bio_list, 0);
+ else
+ zero_pages(obj_request->pages, 0, length);
obj_request->result = 0;
- obj_request->xferred = obj_request->length;
- } else if (obj_request->xferred < obj_request->length &&
- !obj_request->result) {
- zero_bio_chain(obj_request->bio_list, obj_request->xferred);
- obj_request->xferred = obj_request->length;
+ obj_request->xferred = length;
+ } else if (xferred < length && !obj_request->result) {
+ if (obj_request->type == OBJ_REQUEST_BIO)
+ zero_bio_chain(obj_request->bio_list, xferred);
+ else
+ zero_pages(obj_request->pages, xferred, length);
+ obj_request->xferred = length;
}
obj_request_done_set(obj_request);
}
@@ -1308,9 +1542,23 @@ static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
- dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
- obj_request->result, obj_request->xferred, obj_request->length);
- if (obj_request->img_request)
+ struct rbd_img_request *img_request = NULL;
+ struct rbd_device *rbd_dev = NULL;
+ bool layered = false;
+
+ if (obj_request_img_data_test(obj_request)) {
+ img_request = obj_request->img_request;
+ layered = img_request && img_request_layered_test(img_request);
+ rbd_dev = img_request->rbd_dev;
+ }
+
+ dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
+ obj_request, img_request, obj_request->result,
+ obj_request->xferred, obj_request->length);
+ if (layered && obj_request->result == -ENOENT &&
+ obj_request->img_offset < rbd_dev->parent_overlap)
+ rbd_img_parent_read(obj_request);
+ else if (img_request)
rbd_img_obj_request_read_callback(obj_request);
else
obj_request_done_set(obj_request);
@@ -1321,9 +1569,8 @@ static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
dout("%s: obj %p result %d %llu\n", __func__, obj_request,
obj_request->result, obj_request->length);
/*
- * There is no such thing as a successful short write.
- * Our xferred value is the number of bytes transferred
- * back. Set it to our originally-requested length.
+ * There is no such thing as a successful short write. Set
+ * it to our originally-requested length.
*/
obj_request->xferred = obj_request->length;
obj_request_done_set(obj_request);
@@ -1347,22 +1594,25 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
rbd_assert(osd_req == obj_request->osd_req);
- rbd_assert(!!obj_request->img_request ^
- (obj_request->which == BAD_WHICH));
+ if (obj_request_img_data_test(obj_request)) {
+ rbd_assert(obj_request->img_request);
+ rbd_assert(obj_request->which != BAD_WHICH);
+ } else {
+ rbd_assert(obj_request->which == BAD_WHICH);
+ }
if (osd_req->r_result < 0)
obj_request->result = osd_req->r_result;
- obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
- WARN_ON(osd_req->r_num_ops != 1); /* For now */
+ BUG_ON(osd_req->r_num_ops > 2);
/*
* We support a 64-bit length, but ultimately it has to be
* passed to blk_end_request(), which takes an unsigned int.
*/
obj_request->xferred = osd_req->r_reply_op_len[0];
- rbd_assert(obj_request->xferred < (u64) UINT_MAX);
- opcode = osd_req->r_request_ops[0].op;
+ rbd_assert(obj_request->xferred < (u64)UINT_MAX);
+ opcode = osd_req->r_ops[0].op;
switch (opcode) {
case CEPH_OSD_OP_READ:
rbd_osd_read_callback(obj_request);
@@ -1388,28 +1638,49 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_obj_request_complete(obj_request);
}
+static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct ceph_osd_request *osd_req = obj_request->osd_req;
+ u64 snap_id;
+
+ rbd_assert(osd_req != NULL);
+
+ snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
+ ceph_osdc_build_request(osd_req, obj_request->offset,
+ NULL, snap_id, NULL);
+}
+
+static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct ceph_osd_request *osd_req = obj_request->osd_req;
+ struct ceph_snap_context *snapc;
+ struct timespec mtime = CURRENT_TIME;
+
+ rbd_assert(osd_req != NULL);
+
+ snapc = img_request ? img_request->snapc : NULL;
+ ceph_osdc_build_request(osd_req, obj_request->offset,
+ snapc, CEPH_NOSNAP, &mtime);
+}
+
static struct ceph_osd_request *rbd_osd_req_create(
struct rbd_device *rbd_dev,
bool write_request,
- struct rbd_obj_request *obj_request,
- struct ceph_osd_req_op *op)
+ struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_snap_context *snapc = NULL;
struct ceph_osd_client *osdc;
struct ceph_osd_request *osd_req;
- struct timespec now;
- struct timespec *mtime;
- u64 snap_id = CEPH_NOSNAP;
- u64 offset = obj_request->offset;
- u64 length = obj_request->length;
- if (img_request) {
- rbd_assert(img_request->write_request == write_request);
- if (img_request->write_request)
+ if (obj_request_img_data_test(obj_request)) {
+ struct rbd_img_request *img_request = obj_request->img_request;
+
+ rbd_assert(write_request ==
+ img_request_write_test(img_request));
+ if (write_request)
snapc = img_request->snapc;
- else
- snap_id = img_request->snap_id;
}
/* Allocate and initialize the request, for the single op */
@@ -1419,31 +1690,10 @@ static struct ceph_osd_request *rbd_osd_req_create(
if (!osd_req)
return NULL; /* ENOMEM */
- rbd_assert(obj_request_type_valid(obj_request->type));
- switch (obj_request->type) {
- case OBJ_REQUEST_NODATA:
- break; /* Nothing to do */
- case OBJ_REQUEST_BIO:
- rbd_assert(obj_request->bio_list != NULL);
- osd_req->r_bio = obj_request->bio_list;
- break;
- case OBJ_REQUEST_PAGES:
- osd_req->r_pages = obj_request->pages;
- osd_req->r_num_pages = obj_request->page_count;
- osd_req->r_page_alignment = offset & ~PAGE_MASK;
- break;
- }
-
- if (write_request) {
+ if (write_request)
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
- now = CURRENT_TIME;
- mtime = &now;
- } else {
+ else
osd_req->r_flags = CEPH_OSD_FLAG_READ;
- mtime = NULL; /* not needed for reads */
- offset = 0; /* These are not used... */
- length = 0; /* ...for osd read requests */
- }
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
@@ -1454,14 +1704,51 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_file_layout = rbd_dev->layout; /* struct */
- /* osd_req will get its own reference to snapc (if non-null) */
+ return osd_req;
+}
+
+/*
+ * Create a copyup osd request based on the information in the
+ * object request supplied. A copyup request has two osd ops,
+ * a copyup method call, and a "normal" write request.
+ */
+static struct ceph_osd_request *
+rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ struct ceph_snap_context *snapc;
+ struct rbd_device *rbd_dev;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *osd_req;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+ rbd_assert(img_request);
+ rbd_assert(img_request_write_test(img_request));
- ceph_osdc_build_request(osd_req, offset, length, 1, op,
- snapc, snap_id, mtime);
+ /* Allocate and initialize the request, for the two ops */
+
+ snapc = img_request->snapc;
+ rbd_dev = img_request->rbd_dev;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
+ if (!osd_req)
+ return NULL; /* ENOMEM */
+
+ osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+ osd_req->r_callback = rbd_osd_req_callback;
+ osd_req->r_priv = obj_request;
+
+ osd_req->r_oid_len = strlen(obj_request->object_name);
+ rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
+ memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
+
+ osd_req->r_file_layout = rbd_dev->layout; /* struct */
return osd_req;
}
+
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
ceph_osdc_put_request(osd_req);
@@ -1480,18 +1767,23 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
rbd_assert(obj_request_type_valid(type));
size = strlen(object_name) + 1;
- obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
- if (!obj_request)
+ name = kmalloc(size, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
+ if (!obj_request) {
+ kfree(name);
return NULL;
+ }
- name = (char *)(obj_request + 1);
obj_request->object_name = memcpy(name, object_name, size);
obj_request->offset = offset;
obj_request->length = length;
+ obj_request->flags = 0;
obj_request->which = BAD_WHICH;
obj_request->type = type;
INIT_LIST_HEAD(&obj_request->links);
- obj_request_done_init(obj_request);
init_completion(&obj_request->completion);
kref_init(&obj_request->kref);
@@ -1530,7 +1822,9 @@ static void rbd_obj_request_destroy(struct kref *kref)
break;
}
- kfree(obj_request);
+ kfree(obj_request->object_name);
+ obj_request->object_name = NULL;
+ kmem_cache_free(rbd_obj_request_cache, obj_request);
}
/*
@@ -1541,37 +1835,40 @@ static void rbd_obj_request_destroy(struct kref *kref)
static struct rbd_img_request *rbd_img_request_create(
struct rbd_device *rbd_dev,
u64 offset, u64 length,
- bool write_request)
+ bool write_request,
+ bool child_request)
{
struct rbd_img_request *img_request;
- struct ceph_snap_context *snapc = NULL;
- img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
+ img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
if (!img_request)
return NULL;
if (write_request) {
down_read(&rbd_dev->header_rwsem);
- snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ ceph_get_snap_context(rbd_dev->header.snapc);
up_read(&rbd_dev->header_rwsem);
- if (WARN_ON(!snapc)) {
- kfree(img_request);
- return NULL; /* Shouldn't happen */
- }
}
img_request->rq = NULL;
img_request->rbd_dev = rbd_dev;
img_request->offset = offset;
img_request->length = length;
- img_request->write_request = write_request;
- if (write_request)
- img_request->snapc = snapc;
- else
+ img_request->flags = 0;
+ if (write_request) {
+ img_request_write_set(img_request);
+ img_request->snapc = rbd_dev->header.snapc;
+ } else {
img_request->snap_id = rbd_dev->spec->snap_id;
+ }
+ if (child_request)
+ img_request_child_set(img_request);
+ if (rbd_dev->parent_spec)
+ img_request_layered_set(img_request);
spin_lock_init(&img_request->completion_lock);
img_request->next_completion = 0;
img_request->callback = NULL;
+ img_request->result = 0;
img_request->obj_request_count = 0;
INIT_LIST_HEAD(&img_request->obj_requests);
kref_init(&img_request->kref);
@@ -1600,78 +1897,204 @@ static void rbd_img_request_destroy(struct kref *kref)
rbd_img_obj_request_del(img_request, obj_request);
rbd_assert(img_request->obj_request_count == 0);
- if (img_request->write_request)
+ if (img_request_write_test(img_request))
ceph_put_snap_context(img_request->snapc);
- kfree(img_request);
+ if (img_request_child_test(img_request))
+ rbd_obj_request_put(img_request->obj_request);
+
+ kmem_cache_free(rbd_img_request_cache, img_request);
+}
+
+static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ unsigned int xferred;
+ int result;
+ bool more;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+
+ rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
+ xferred = (unsigned int)obj_request->xferred;
+ result = obj_request->result;
+ if (result) {
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+
+ rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
+ img_request_write_test(img_request) ? "write" : "read",
+ obj_request->length, obj_request->img_offset,
+ obj_request->offset);
+ rbd_warn(rbd_dev, " result %d xferred %x\n",
+ result, xferred);
+ if (!img_request->result)
+ img_request->result = result;
+ }
+
+ /* Image object requests don't own their page array */
+
+ if (obj_request->type == OBJ_REQUEST_PAGES) {
+ obj_request->pages = NULL;
+ obj_request->page_count = 0;
+ }
+
+ if (img_request_child_test(img_request)) {
+ rbd_assert(img_request->obj_request != NULL);
+ more = obj_request->which < img_request->obj_request_count - 1;
+ } else {
+ rbd_assert(img_request->rq != NULL);
+ more = blk_end_request(img_request->rq, result, xferred);
+ }
+
+ return more;
}
-static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
- struct bio *bio_list)
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ u32 which = obj_request->which;
+ bool more = true;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+
+ dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+ rbd_assert(img_request != NULL);
+ rbd_assert(img_request->obj_request_count > 0);
+ rbd_assert(which != BAD_WHICH);
+ rbd_assert(which < img_request->obj_request_count);
+ rbd_assert(which >= img_request->next_completion);
+
+ spin_lock_irq(&img_request->completion_lock);
+ if (which != img_request->next_completion)
+ goto out;
+
+ for_each_obj_request_from(img_request, obj_request) {
+ rbd_assert(more);
+ rbd_assert(which < img_request->obj_request_count);
+
+ if (!obj_request_done_test(obj_request))
+ break;
+ more = rbd_img_obj_end_request(obj_request);
+ which++;
+ }
+
+ rbd_assert(more ^ (which == img_request->obj_request_count));
+ img_request->next_completion = which;
+out:
+ spin_unlock_irq(&img_request->completion_lock);
+
+ if (!more)
+ rbd_img_request_complete(img_request);
+}
+
+/*
+ * Split up an image request into one or more object requests, each
+ * to a different object. The "type" parameter indicates whether
+ * "data_desc" is the pointer to the head of a list of bio
+ * structures, or the base of a page array. In either case this
+ * function assumes data_desc describes memory sufficient to hold
+ * all data described by the image request.
+ */
+static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ enum obj_request_type type,
+ void *data_desc)
{
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct rbd_obj_request *obj_request = NULL;
struct rbd_obj_request *next_obj_request;
- unsigned int bio_offset;
- u64 image_offset;
+ bool write_request = img_request_write_test(img_request);
+ struct bio *bio_list;
+ unsigned int bio_offset = 0;
+ struct page **pages;
+ u64 img_offset;
u64 resid;
u16 opcode;
- dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
+ dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
+ (int)type, data_desc);
- opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
- : CEPH_OSD_OP_READ;
- bio_offset = 0;
- image_offset = img_request->offset;
- rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
+ img_offset = img_request->offset;
resid = img_request->length;
rbd_assert(resid > 0);
+
+ if (type == OBJ_REQUEST_BIO) {
+ bio_list = data_desc;
+ rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ } else {
+ rbd_assert(type == OBJ_REQUEST_PAGES);
+ pages = data_desc;
+ }
+
while (resid) {
+ struct ceph_osd_request *osd_req;
const char *object_name;
- unsigned int clone_size;
- struct ceph_osd_req_op *op;
u64 offset;
u64 length;
- object_name = rbd_segment_name(rbd_dev, image_offset);
+ object_name = rbd_segment_name(rbd_dev, img_offset);
if (!object_name)
goto out_unwind;
- offset = rbd_segment_offset(rbd_dev, image_offset);
- length = rbd_segment_length(rbd_dev, image_offset, resid);
+ offset = rbd_segment_offset(rbd_dev, img_offset);
+ length = rbd_segment_length(rbd_dev, img_offset, resid);
obj_request = rbd_obj_request_create(object_name,
- offset, length,
- OBJ_REQUEST_BIO);
- kfree(object_name); /* object request has its own copy */
+ offset, length, type);
+ /* object request has its own copy of the object name */
+ rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
- rbd_assert(length <= (u64) UINT_MAX);
- clone_size = (unsigned int) length;
- obj_request->bio_list = bio_chain_clone_range(&bio_list,
- &bio_offset, clone_size,
- GFP_ATOMIC);
- if (!obj_request->bio_list)
- goto out_partial;
+ if (type == OBJ_REQUEST_BIO) {
+ unsigned int clone_size;
+
+ rbd_assert(length <= (u64)UINT_MAX);
+ clone_size = (unsigned int)length;
+ obj_request->bio_list =
+ bio_chain_clone_range(&bio_list,
+ &bio_offset,
+ clone_size,
+ GFP_ATOMIC);
+ if (!obj_request->bio_list)
+ goto out_partial;
+ } else {
+ unsigned int page_count;
+
+ obj_request->pages = pages;
+ page_count = (u32)calc_pages_for(offset, length);
+ obj_request->page_count = page_count;
+ if ((offset + length) & ~PAGE_MASK)
+ page_count--; /* more on last page */
+ pages += page_count;
+ }
- /*
- * Build up the op to use in building the osd
- * request. Note that the contents of the op are
- * copied by rbd_osd_req_create().
- */
- op = rbd_osd_req_op_create(opcode, offset, length);
- if (!op)
- goto out_partial;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev,
- img_request->write_request,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
- if (!obj_request->osd_req)
+ osd_req = rbd_osd_req_create(rbd_dev, write_request,
+ obj_request);
+ if (!osd_req)
goto out_partial;
- /* status and version are initially zero-filled */
+ obj_request->osd_req = osd_req;
+ obj_request->callback = rbd_img_obj_callback;
+
+ osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
+ 0, 0);
+ if (type == OBJ_REQUEST_BIO)
+ osd_req_op_extent_osd_data_bio(osd_req, 0,
+ obj_request->bio_list, length);
+ else
+ osd_req_op_extent_osd_data_pages(osd_req, 0,
+ obj_request->pages, length,
+ offset & ~PAGE_MASK, false, false);
+ if (write_request)
+ rbd_osd_req_format_write(obj_request);
+ else
+ rbd_osd_req_format_read(obj_request);
+
+ obj_request->img_offset = img_offset;
rbd_img_obj_request_add(img_request, obj_request);
- image_offset += length;
+ img_offset += length;
resid -= length;
}
@@ -1686,61 +2109,389 @@ out_unwind:
return -ENOMEM;
}
-static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+static void
+rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
- u32 which = obj_request->which;
- bool more = true;
+ struct rbd_device *rbd_dev;
+ u64 length;
+ u32 page_count;
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
+ rbd_assert(img_request);
- dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+ rbd_dev = img_request->rbd_dev;
+ rbd_assert(rbd_dev);
+ length = (u64)1 << rbd_dev->header.obj_order;
+ page_count = (u32)calc_pages_for(0, length);
+
+ rbd_assert(obj_request->copyup_pages);
+ ceph_release_page_vector(obj_request->copyup_pages, page_count);
+ obj_request->copyup_pages = NULL;
+
+ /*
+ * We want the transfer count to reflect the size of the
+ * original write request. There is no such thing as a
+ * successful short write, so if the request was successful
+ * we can just set it to the originally-requested length.
+ */
+ if (!obj_request->result)
+ obj_request->xferred = obj_request->length;
+
+ /* Finish up with the normal image object callback */
+
+ rbd_img_obj_callback(obj_request);
+}
+
+static void
+rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
+{
+ struct rbd_obj_request *orig_request;
+ struct ceph_osd_request *osd_req;
+ struct ceph_osd_client *osdc;
+ struct rbd_device *rbd_dev;
+ struct page **pages;
+ int result;
+ u64 obj_size;
+ u64 xferred;
+
+ rbd_assert(img_request_child_test(img_request));
+
+ /* First get what we need from the image request */
+
+ pages = img_request->copyup_pages;
+ rbd_assert(pages != NULL);
+ img_request->copyup_pages = NULL;
+
+ orig_request = img_request->obj_request;
+ rbd_assert(orig_request != NULL);
+ rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
+ result = img_request->result;
+ obj_size = img_request->length;
+ xferred = img_request->xferred;
+
+ rbd_dev = img_request->rbd_dev;
+ rbd_assert(rbd_dev);
+ rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
+
+ rbd_img_request_put(img_request);
+
+ if (result)
+ goto out_err;
+
+ /* Allocate the new copyup osd request for the original request */
+
+ result = -ENOMEM;
+ rbd_assert(!orig_request->osd_req);
+ osd_req = rbd_osd_req_create_copyup(orig_request);
+ if (!osd_req)
+ goto out_err;
+ orig_request->osd_req = osd_req;
+ orig_request->copyup_pages = pages;
+
+ /* Initialize the copyup op */
+
+ osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
+ osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
+ false, false);
+
+ /* Then the original write request op */
+
+ osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
+ orig_request->offset,
+ orig_request->length, 0, 0);
+ osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
+ orig_request->length);
+
+ rbd_osd_req_format_write(orig_request);
+
+ /* All set, send it off. */
+
+ orig_request->callback = rbd_img_obj_copyup_callback;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ result = rbd_obj_request_submit(osdc, orig_request);
+ if (!result)
+ return;
+out_err:
+ /* Record the error code and complete the request */
+
+ orig_request->result = result;
+ orig_request->xferred = 0;
+ obj_request_done_set(orig_request);
+ rbd_obj_request_complete(orig_request);
+}
+
+/*
+ * Read from the parent image the range of data that covers the
+ * entire target of the given object request. This is used for
+ * satisfying a layered image write request when the target of an
+ * object request from the image request does not exist.
+ *
+ * A page array big enough to hold the returned data is allocated
+ * and supplied to rbd_img_request_fill() as the "data descriptor."
+ * When the read completes, this page array will be transferred to
+ * the original object request for the copyup operation.
+ *
+ * If an error occurs, record it as the result of the original
+ * object request and mark it done so it gets completed.
+ */
+static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request = NULL;
+ struct rbd_img_request *parent_request = NULL;
+ struct rbd_device *rbd_dev;
+ u64 img_offset;
+ u64 length;
+ struct page **pages = NULL;
+ u32 page_count;
+ int result;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+
+ img_request = obj_request->img_request;
rbd_assert(img_request != NULL);
- rbd_assert(img_request->rq != NULL);
- rbd_assert(img_request->obj_request_count > 0);
- rbd_assert(which != BAD_WHICH);
- rbd_assert(which < img_request->obj_request_count);
- rbd_assert(which >= img_request->next_completion);
+ rbd_dev = img_request->rbd_dev;
+ rbd_assert(rbd_dev->parent != NULL);
- spin_lock_irq(&img_request->completion_lock);
- if (which != img_request->next_completion)
- goto out;
+ /*
+ * First things first. The original osd request is of no
+ * use to use any more, we'll need a new one that can hold
+ * the two ops in a copyup request. We'll get that later,
+ * but for now we can release the old one.
+ */
+ rbd_osd_req_destroy(obj_request->osd_req);
+ obj_request->osd_req = NULL;
- for_each_obj_request_from(img_request, obj_request) {
- unsigned int xferred;
- int result;
+ /*
+ * Determine the byte range covered by the object in the
+ * child image to which the original request was to be sent.
+ */
+ img_offset = obj_request->img_offset - obj_request->offset;
+ length = (u64)1 << rbd_dev->header.obj_order;
- rbd_assert(more);
- rbd_assert(which < img_request->obj_request_count);
+ /*
+ * There is no defined parent data beyond the parent
+ * overlap, so limit what we read at that boundary if
+ * necessary.
+ */
+ if (img_offset + length > rbd_dev->parent_overlap) {
+ rbd_assert(img_offset < rbd_dev->parent_overlap);
+ length = rbd_dev->parent_overlap - img_offset;
+ }
- if (!obj_request_done_test(obj_request))
- break;
+ /*
+ * Allocate a page array big enough to receive the data read
+ * from the parent.
+ */
+ page_count = (u32)calc_pages_for(0, length);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ result = PTR_ERR(pages);
+ pages = NULL;
+ goto out_err;
+ }
- rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
- xferred = (unsigned int) obj_request->xferred;
- result = (int) obj_request->result;
- if (result)
- rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
- img_request->write_request ? "write" : "read",
- result, xferred);
+ result = -ENOMEM;
+ parent_request = rbd_img_request_create(rbd_dev->parent,
+ img_offset, length,
+ false, true);
+ if (!parent_request)
+ goto out_err;
+ rbd_obj_request_get(obj_request);
+ parent_request->obj_request = obj_request;
- more = blk_end_request(img_request->rq, result, xferred);
- which++;
+ result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
+ if (result)
+ goto out_err;
+ parent_request->copyup_pages = pages;
+
+ parent_request->callback = rbd_img_obj_parent_read_full_callback;
+ result = rbd_img_request_submit(parent_request);
+ if (!result)
+ return 0;
+
+ parent_request->copyup_pages = NULL;
+ parent_request->obj_request = NULL;
+ rbd_obj_request_put(obj_request);
+out_err:
+ if (pages)
+ ceph_release_page_vector(pages, page_count);
+ if (parent_request)
+ rbd_img_request_put(parent_request);
+ obj_request->result = result;
+ obj_request->xferred = 0;
+ obj_request_done_set(obj_request);
+
+ return result;
+}
+
+static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
+{
+ struct rbd_obj_request *orig_request;
+ int result;
+
+ rbd_assert(!obj_request_img_data_test(obj_request));
+
+ /*
+ * All we need from the object request is the original
+ * request and the result of the STAT op. Grab those, then
+ * we're done with the request.
+ */
+ orig_request = obj_request->obj_request;
+ obj_request->obj_request = NULL;
+ rbd_assert(orig_request);
+ rbd_assert(orig_request->img_request);
+
+ result = obj_request->result;
+ obj_request->result = 0;
+
+ dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
+ obj_request, orig_request, result,
+ obj_request->xferred, obj_request->length);
+ rbd_obj_request_put(obj_request);
+
+ rbd_assert(orig_request);
+ rbd_assert(orig_request->img_request);
+
+ /*
+ * Our only purpose here is to determine whether the object
+ * exists, and we don't want to treat the non-existence as
+ * an error. If something else comes back, transfer the
+ * error to the original request and complete it now.
+ */
+ if (!result) {
+ obj_request_existence_set(orig_request, true);
+ } else if (result == -ENOENT) {
+ obj_request_existence_set(orig_request, false);
+ } else if (result) {
+ orig_request->result = result;
+ goto out;
}
- rbd_assert(more ^ (which == img_request->obj_request_count));
- img_request->next_completion = which;
+ /*
+ * Resubmit the original request now that we have recorded
+ * whether the target object exists.
+ */
+ orig_request->result = rbd_img_obj_request_submit(orig_request);
out:
- spin_unlock_irq(&img_request->completion_lock);
+ if (orig_request->result)
+ rbd_obj_request_complete(orig_request);
+ rbd_obj_request_put(orig_request);
+}
- if (!more)
- rbd_img_request_complete(img_request);
+static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
+{
+ struct rbd_obj_request *stat_request;
+ struct rbd_device *rbd_dev;
+ struct ceph_osd_client *osdc;
+ struct page **pages = NULL;
+ u32 page_count;
+ size_t size;
+ int ret;
+
+ /*
+ * The response data for a STAT call consists of:
+ * le64 length;
+ * struct {
+ * le32 tv_sec;
+ * le32 tv_nsec;
+ * } mtime;
+ */
+ size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
+ page_count = (u32)calc_pages_for(0, size);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ ret = -ENOMEM;
+ stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
+ OBJ_REQUEST_PAGES);
+ if (!stat_request)
+ goto out;
+
+ rbd_obj_request_get(obj_request);
+ stat_request->obj_request = obj_request;
+ stat_request->pages = pages;
+ stat_request->page_count = page_count;
+
+ rbd_assert(obj_request->img_request);
+ rbd_dev = obj_request->img_request->rbd_dev;
+ stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ stat_request);
+ if (!stat_request->osd_req)
+ goto out;
+ stat_request->callback = rbd_img_obj_exists_callback;
+
+ osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
+ osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
+ false, false);
+ rbd_osd_req_format_read(stat_request);
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ ret = rbd_obj_request_submit(osdc, stat_request);
+out:
+ if (ret)
+ rbd_obj_request_put(obj_request);
+
+ return ret;
+}
+
+static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ struct rbd_device *rbd_dev;
+ bool known;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+
+ img_request = obj_request->img_request;
+ rbd_assert(img_request);
+ rbd_dev = img_request->rbd_dev;
+
+ /*
+ * Only writes to layered images need special handling.
+ * Reads and non-layered writes are simple object requests.
+ * Layered writes that start beyond the end of the overlap
+ * with the parent have no parent data, so they too are
+ * simple object requests. Finally, if the target object is
+ * known to already exist, its parent data has already been
+ * copied, so a write to the object can also be handled as a
+ * simple object request.
+ */
+ if (!img_request_write_test(img_request) ||
+ !img_request_layered_test(img_request) ||
+ rbd_dev->parent_overlap <= obj_request->img_offset ||
+ ((known = obj_request_known_test(obj_request)) &&
+ obj_request_exists_test(obj_request))) {
+
+ struct rbd_device *rbd_dev;
+ struct ceph_osd_client *osdc;
+
+ rbd_dev = obj_request->img_request->rbd_dev;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+
+ return rbd_obj_request_submit(osdc, obj_request);
+ }
+
+ /*
+ * It's a layered write. The target object might exist but
+ * we may not know that yet. If we know it doesn't exist,
+ * start by reading the data for the full target object from
+ * the parent so we can use it for a copyup to the target.
+ */
+ if (known)
+ return rbd_img_obj_parent_read_full(obj_request);
+
+ /* We don't know whether the target exists. Go find out. */
+
+ return rbd_img_obj_exists_submit(obj_request);
}
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
- struct rbd_device *rbd_dev = img_request->rbd_dev;
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
@@ -1748,27 +2499,105 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
int ret;
- obj_request->callback = rbd_img_obj_callback;
- ret = rbd_obj_request_submit(osdc, obj_request);
+ ret = rbd_img_obj_request_submit(obj_request);
if (ret)
return ret;
- /*
- * The image request has its own reference to each
- * of its object requests, so we can safely drop the
- * initial one here.
- */
- rbd_obj_request_put(obj_request);
}
return 0;
}
-static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
- u64 ver, u64 notify_id)
+static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
- struct ceph_osd_req_op *op;
- struct ceph_osd_client *osdc;
+ struct rbd_device *rbd_dev;
+ u64 obj_end;
+
+ rbd_assert(img_request_child_test(img_request));
+
+ obj_request = img_request->obj_request;
+ rbd_assert(obj_request);
+ rbd_assert(obj_request->img_request);
+
+ obj_request->result = img_request->result;
+ if (obj_request->result)
+ goto out;
+
+ /*
+ * We need to zero anything beyond the parent overlap
+ * boundary. Since rbd_img_obj_request_read_callback()
+ * will zero anything beyond the end of a short read, an
+ * easy way to do this is to pretend the data from the
+ * parent came up short--ending at the overlap boundary.
+ */
+ rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
+ obj_end = obj_request->img_offset + obj_request->length;
+ rbd_dev = obj_request->img_request->rbd_dev;
+ if (obj_end > rbd_dev->parent_overlap) {
+ u64 xferred = 0;
+
+ if (obj_request->img_offset < rbd_dev->parent_overlap)
+ xferred = rbd_dev->parent_overlap -
+ obj_request->img_offset;
+
+ obj_request->xferred = min(img_request->xferred, xferred);
+ } else {
+ obj_request->xferred = img_request->xferred;
+ }
+out:
+ rbd_img_request_put(img_request);
+ rbd_img_obj_request_read_callback(obj_request);
+ rbd_obj_request_complete(obj_request);
+}
+
+static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
+{
+ struct rbd_device *rbd_dev;
+ struct rbd_img_request *img_request;
+ int result;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ rbd_assert(obj_request->img_request != NULL);
+ rbd_assert(obj_request->result == (s32) -ENOENT);
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+
+ rbd_dev = obj_request->img_request->rbd_dev;
+ rbd_assert(rbd_dev->parent != NULL);
+ /* rbd_read_finish(obj_request, obj_request->length); */
+ img_request = rbd_img_request_create(rbd_dev->parent,
+ obj_request->img_offset,
+ obj_request->length,
+ false, true);
+ result = -ENOMEM;
+ if (!img_request)
+ goto out_err;
+
+ rbd_obj_request_get(obj_request);
+ img_request->obj_request = obj_request;
+
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+ obj_request->bio_list);
+ if (result)
+ goto out_err;
+
+ img_request->callback = rbd_img_parent_read_callback;
+ result = rbd_img_request_submit(img_request);
+ if (result)
+ goto out_err;
+
+ return;
+out_err:
+ if (img_request)
+ rbd_img_request_put(img_request);
+ obj_request->result = result;
+ obj_request->xferred = 0;
+ obj_request_done_set(obj_request);
+}
+
+static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
+{
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
@@ -1777,17 +2606,15 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
return -ENOMEM;
ret = -ENOMEM;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
- if (!op)
- goto out;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
-
- osdc = &rbd_dev->rbd_client->client->osdc;
obj_request->callback = rbd_obj_request_put;
+
+ osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
+ notify_id, 0, 0);
+ rbd_osd_req_format_read(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
out:
if (ret)
@@ -1799,21 +2626,16 @@ out:
static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
struct rbd_device *rbd_dev = (struct rbd_device *)data;
- u64 hver;
- int rc;
if (!rbd_dev)
return;
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
- rbd_dev->header_name, (unsigned long long) notify_id,
- (unsigned int) opcode);
- rc = rbd_dev_refresh(rbd_dev, &hver);
- if (rc)
- rbd_warn(rbd_dev, "got notification but failed to "
- " update snaps: %d\n", rc);
+ rbd_dev->header_name, (unsigned long long)notify_id,
+ (unsigned int)opcode);
+ (void)rbd_dev_refresh(rbd_dev);
- rbd_obj_notify_ack(rbd_dev, hver, notify_id);
+ rbd_obj_notify_ack(rbd_dev, notify_id);
}
/*
@@ -1824,7 +2646,6 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct ceph_osd_req_op *op;
int ret;
rbd_assert(start ^ !!rbd_dev->watch_event);
@@ -1844,14 +2665,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
if (!obj_request)
goto out_cancel;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie,
- rbd_dev->header.obj_version, start);
- if (!op)
- goto out_cancel;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
if (!obj_request->osd_req)
goto out_cancel;
@@ -1860,6 +2674,11 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
else
ceph_osdc_unregister_linger_request(osdc,
rbd_dev->watch_request->osd_req);
+
+ osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
+ rbd_dev->watch_event->cookie, 0, start);
+ rbd_osd_req_format_write(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out_cancel;
@@ -1899,40 +2718,38 @@ out_cancel:
}
/*
- * Synchronous osd object method call
+ * Synchronous osd object method call. Returns the number of bytes
+ * returned in the outbound buffer, or a negative error code.
*/
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
const char *object_name,
const char *class_name,
const char *method_name,
- const char *outbound,
+ const void *outbound,
size_t outbound_size,
- char *inbound,
- size_t inbound_size,
- u64 *version)
+ void *inbound,
+ size_t inbound_size)
{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc;
- struct ceph_osd_req_op *op;
struct page **pages;
u32 page_count;
int ret;
/*
- * Method calls are ultimately read operations but they
- * don't involve object data (so no offset or length).
- * The result should placed into the inbound buffer
- * provided. They also supply outbound data--parameters for
- * the object method. Currently if this is present it will
- * be a snapshot id.
+ * Method calls are ultimately read operations. The result
+ * should placed into the inbound buffer provided. They
+ * also supply outbound data--parameters for the object
+ * method. Currently if this is present it will be a
+ * snapshot id.
*/
- page_count = (u32) calc_pages_for(0, inbound_size);
+ page_count = (u32)calc_pages_for(0, inbound_size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = -ENOMEM;
- obj_request = rbd_obj_request_create(object_name, 0, 0,
+ obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
OBJ_REQUEST_PAGES);
if (!obj_request)
goto out;
@@ -1940,17 +2757,29 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
obj_request->pages = pages;
obj_request->page_count = page_count;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
- method_name, outbound, outbound_size);
- if (!op)
- goto out;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
- osdc = &rbd_dev->rbd_client->client->osdc;
+ osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
+ class_name, method_name);
+ if (outbound_size) {
+ struct ceph_pagelist *pagelist;
+
+ pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
+ if (!pagelist)
+ goto out;
+
+ ceph_pagelist_init(pagelist);
+ ceph_pagelist_append(pagelist, outbound, outbound_size);
+ osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
+ pagelist);
+ }
+ osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
+ obj_request->pages, inbound_size,
+ 0, false, false);
+ rbd_osd_req_format_read(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
@@ -1961,10 +2790,10 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
ret = obj_request->result;
if (ret < 0)
goto out;
- ret = 0;
+
+ rbd_assert(obj_request->xferred < (u64)INT_MAX);
+ ret = (int)obj_request->xferred;
ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
- if (version)
- *version = obj_request->version;
out:
if (obj_request)
rbd_obj_request_put(obj_request);
@@ -2034,18 +2863,22 @@ static void rbd_request_fn(struct request_queue *q)
}
result = -EINVAL;
- if (WARN_ON(offset && length > U64_MAX - offset + 1))
+ if (offset && length > U64_MAX - offset + 1) {
+ rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
+ offset, length);
goto end_request; /* Shouldn't happen */
+ }
result = -ENOMEM;
img_request = rbd_img_request_create(rbd_dev, offset, length,
- write_request);
+ write_request, false);
if (!img_request)
goto end_request;
img_request->rq = rq;
- result = rbd_img_request_fill_bio(img_request, rq->bio);
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+ rq->bio);
if (!result)
result = rbd_img_request_submit(img_request);
if (result)
@@ -2053,8 +2886,10 @@ static void rbd_request_fn(struct request_queue *q)
end_request:
spin_lock_irq(q->queue_lock);
if (result < 0) {
- rbd_warn(rbd_dev, "obj_request %s result %d\n",
- write_request ? "write" : "read", result);
+ rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
+ write_request ? "write" : "read",
+ length, offset, result);
+
__blk_end_request_all(rq, result);
}
}
@@ -2113,22 +2948,22 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
if (!disk)
return;
- if (disk->flags & GENHD_FL_UP)
+ rbd_dev->disk = NULL;
+ if (disk->flags & GENHD_FL_UP) {
del_gendisk(disk);
- if (disk->queue)
- blk_cleanup_queue(disk->queue);
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+ }
put_disk(disk);
}
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
const char *object_name,
- u64 offset, u64 length,
- char *buf, u64 *version)
+ u64 offset, u64 length, void *buf)
{
- struct ceph_osd_req_op *op;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc;
struct page **pages = NULL;
u32 page_count;
size_t size;
@@ -2148,16 +2983,19 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
obj_request->pages = pages;
obj_request->page_count = page_count;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
- if (!op)
- goto out;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
- osdc = &rbd_dev->rbd_client->client->osdc;
+ osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
+ offset, length, 0, 0);
+ osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
+ obj_request->pages,
+ obj_request->length,
+ obj_request->offset & ~PAGE_MASK,
+ false, false);
+ rbd_osd_req_format_read(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
@@ -2172,10 +3010,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
size = (size_t) obj_request->xferred;
ceph_copy_from_page_vector(pages, buf, 0, size);
- rbd_assert(size <= (size_t) INT_MAX);
- ret = (int) size;
- if (version)
- *version = obj_request->version;
+ rbd_assert(size <= (size_t)INT_MAX);
+ ret = (int)size;
out:
if (obj_request)
rbd_obj_request_put(obj_request);
@@ -2196,7 +3032,7 @@ out:
* Returns a pointer-coded errno if a failure occurs.
*/
static struct rbd_image_header_ondisk *
-rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
+rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@@ -2224,11 +3060,10 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
return ERR_PTR(-ENOMEM);
ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
- 0, size,
- (char *) ondisk, version);
+ 0, size, ondisk);
if (ret < 0)
goto out_err;
- if (WARN_ON((size_t) ret < size)) {
+ if ((size_t)ret < size) {
ret = -ENXIO;
rbd_warn(rbd_dev, "short header read (want %zd got %d)",
size, ret);
@@ -2260,46 +3095,36 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
struct rbd_image_header_ondisk *ondisk;
- u64 ver = 0;
int ret;
- ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
+ ondisk = rbd_dev_v1_header_read(rbd_dev);
if (IS_ERR(ondisk))
return PTR_ERR(ondisk);
ret = rbd_header_from_disk(header, ondisk);
- if (ret >= 0)
- header->obj_version = ver;
kfree(ondisk);
return ret;
}
-static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
-{
- struct rbd_snap *snap;
- struct rbd_snap *next;
-
- list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
- rbd_remove_snap_dev(snap);
-}
-
static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
{
- sector_t size;
-
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
return;
- size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
- dout("setting size to %llu sectors", (unsigned long long) size);
- rbd_dev->mapping.size = (u64) size;
- set_capacity(rbd_dev->disk, size);
+ if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
+ sector_t size;
+
+ rbd_dev->mapping.size = rbd_dev->header.image_size;
+ size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long)size);
+ set_capacity(rbd_dev->disk, size);
+ }
}
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -2320,37 +3145,61 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
/* osd requests may still refer to snapc */
ceph_put_snap_context(rbd_dev->header.snapc);
- if (hver)
- *hver = h.obj_version;
- rbd_dev->header.obj_version = h.obj_version;
rbd_dev->header.image_size = h.image_size;
rbd_dev->header.snapc = h.snapc;
rbd_dev->header.snap_names = h.snap_names;
rbd_dev->header.snap_sizes = h.snap_sizes;
/* Free the extra copy of the object prefix */
- WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
+ if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
+ rbd_warn(rbd_dev, "object prefix changed (ignoring)");
kfree(h.object_prefix);
- ret = rbd_dev_snaps_update(rbd_dev);
- if (!ret)
- ret = rbd_dev_snaps_register(rbd_dev);
-
up_write(&rbd_dev->header_rwsem);
return ret;
}
-static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
+/*
+ * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
+ * has disappeared from the (just updated) snapshot context.
+ */
+static void rbd_exists_validate(struct rbd_device *rbd_dev)
+{
+ u64 snap_id;
+
+ if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
+ return;
+
+ snap_id = rbd_dev->spec->snap_id;
+ if (snap_id == CEPH_NOSNAP)
+ return;
+
+ if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+}
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
+ u64 image_size;
int ret;
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ image_size = rbd_dev->header.image_size;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
if (rbd_dev->image_format == 1)
- ret = rbd_dev_v1_refresh(rbd_dev, hver);
+ ret = rbd_dev_v1_refresh(rbd_dev);
else
- ret = rbd_dev_v2_refresh(rbd_dev, hver);
+ ret = rbd_dev_v2_refresh(rbd_dev);
+
+ /* If it's a mapped snapshot, validate its EXISTS flag */
+
+ rbd_exists_validate(rbd_dev);
mutex_unlock(&ctl_mutex);
+ if (ret)
+ rbd_warn(rbd_dev, "got notification but failed to "
+ " update snaps: %d\n", ret);
+ if (image_size != rbd_dev->header.image_size)
+ revalidate_disk(rbd_dev->disk);
return ret;
}
@@ -2394,8 +3243,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->disk = disk;
- set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
-
return 0;
out_disk:
put_disk(disk);
@@ -2416,13 +3263,9 @@ static ssize_t rbd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- sector_t size;
-
- down_read(&rbd_dev->header_rwsem);
- size = get_capacity(rbd_dev->disk);
- up_read(&rbd_dev->header_rwsem);
- return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)rbd_dev->mapping.size);
}
/*
@@ -2435,7 +3278,7 @@ static ssize_t rbd_features_show(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "0x%016llx\n",
- (unsigned long long) rbd_dev->mapping.features);
+ (unsigned long long)rbd_dev->mapping.features);
}
static ssize_t rbd_major_show(struct device *dev,
@@ -2443,7 +3286,11 @@ static ssize_t rbd_major_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%d\n", rbd_dev->major);
+ if (rbd_dev->major)
+ return sprintf(buf, "%d\n", rbd_dev->major);
+
+ return sprintf(buf, "(none)\n");
+
}
static ssize_t rbd_client_id_show(struct device *dev,
@@ -2469,7 +3316,7 @@ static ssize_t rbd_pool_id_show(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n",
- (unsigned long long) rbd_dev->spec->pool_id);
+ (unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_name_show(struct device *dev,
@@ -2555,7 +3402,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
- ret = rbd_dev_refresh(rbd_dev, NULL);
+ ret = rbd_dev_refresh(rbd_dev);
return ret < 0 ? ret : size;
}
@@ -2606,71 +3453,6 @@ static struct device_type rbd_device_type = {
.release = rbd_sysfs_dev_release,
};
-
-/*
- sysfs - snapshots
-*/
-
-static ssize_t rbd_snap_size_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
-
- return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
-}
-
-static ssize_t rbd_snap_id_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
-
- return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
-}
-
-static ssize_t rbd_snap_features_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
-
- return sprintf(buf, "0x%016llx\n",
- (unsigned long long) snap->features);
-}
-
-static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
-static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
-static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
-
-static struct attribute *rbd_snap_attrs[] = {
- &dev_attr_snap_size.attr,
- &dev_attr_snap_id.attr,
- &dev_attr_snap_features.attr,
- NULL,
-};
-
-static struct attribute_group rbd_snap_attr_group = {
- .attrs = rbd_snap_attrs,
-};
-
-static void rbd_snap_dev_release(struct device *dev)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- kfree(snap->name);
- kfree(snap);
-}
-
-static const struct attribute_group *rbd_snap_attr_groups[] = {
- &rbd_snap_attr_group,
- NULL
-};
-
-static struct device_type rbd_snap_device_type = {
- .groups = rbd_snap_attr_groups,
- .release = rbd_snap_dev_release,
-};
-
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
kref_get(&spec->kref);
@@ -2694,8 +3476,6 @@ static struct rbd_spec *rbd_spec_alloc(void)
return NULL;
kref_init(&spec->kref);
- rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
-
return spec;
}
@@ -2722,7 +3502,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
spin_lock_init(&rbd_dev->lock);
rbd_dev->flags = 0;
INIT_LIST_HEAD(&rbd_dev->node);
- INIT_LIST_HEAD(&rbd_dev->snaps);
init_rwsem(&rbd_dev->header_rwsem);
rbd_dev->spec = spec;
@@ -2740,96 +3519,11 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
- rbd_spec_put(rbd_dev->parent_spec);
- kfree(rbd_dev->header_name);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev);
}
-static bool rbd_snap_registered(struct rbd_snap *snap)
-{
- bool ret = snap->dev.type == &rbd_snap_device_type;
- bool reg = device_is_registered(&snap->dev);
-
- rbd_assert(!ret ^ reg);
-
- return ret;
-}
-
-static void rbd_remove_snap_dev(struct rbd_snap *snap)
-{
- list_del(&snap->node);
- if (device_is_registered(&snap->dev))
- device_unregister(&snap->dev);
-}
-
-static int rbd_register_snap_dev(struct rbd_snap *snap,
- struct device *parent)
-{
- struct device *dev = &snap->dev;
- int ret;
-
- dev->type = &rbd_snap_device_type;
- dev->parent = parent;
- dev->release = rbd_snap_dev_release;
- dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
- dout("%s: registering device for snapshot %s\n", __func__, snap->name);
-
- ret = device_register(dev);
-
- return ret;
-}
-
-static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
- const char *snap_name,
- u64 snap_id, u64 snap_size,
- u64 snap_features)
-{
- struct rbd_snap *snap;
- int ret;
-
- snap = kzalloc(sizeof (*snap), GFP_KERNEL);
- if (!snap)
- return ERR_PTR(-ENOMEM);
-
- ret = -ENOMEM;
- snap->name = kstrdup(snap_name, GFP_KERNEL);
- if (!snap->name)
- goto err;
-
- snap->id = snap_id;
- snap->size = snap_size;
- snap->features = snap_features;
-
- return snap;
-
-err:
- kfree(snap->name);
- kfree(snap);
-
- return ERR_PTR(ret);
-}
-
-static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
- u64 *snap_size, u64 *snap_features)
-{
- char *snap_name;
-
- rbd_assert(which < rbd_dev->header.snapc->num_snaps);
-
- *snap_size = rbd_dev->header.snap_sizes[which];
- *snap_features = 0; /* No features for v1 */
-
- /* Skip over names until we find the one we are looking for */
-
- snap_name = rbd_dev->header.snap_names;
- while (which--)
- snap_name += strlen(snap_name) + 1;
-
- return snap_name;
-}
-
/*
* Get the size and object order for an image snapshot, or if
* snap_id is CEPH_NOSNAP, gets this information for the base
@@ -2847,18 +3541,21 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_size",
- (char *) &snapid, sizeof (snapid),
- (char *) &size_buf, sizeof (size_buf), NULL);
+ &snapid, sizeof (snapid),
+ &size_buf, sizeof (size_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+ if (ret < sizeof (size_buf))
+ return -ERANGE;
- *order = size_buf.order;
+ if (order)
+ *order = size_buf.order;
*snap_size = le64_to_cpu(size_buf.size);
dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
- (unsigned long long) snap_id, (unsigned int) *order,
- (unsigned long long) *snap_size);
+ (unsigned long long)snap_id, (unsigned int)*order,
+ (unsigned long long)*snap_size);
return 0;
}
@@ -2881,17 +3578,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
- "rbd", "get_object_prefix",
- NULL, 0,
- reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
+ "rbd", "get_object_prefix", NULL, 0,
+ reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
- p + RBD_OBJ_PREFIX_LEN_MAX,
- NULL, GFP_NOIO);
+ p + ret, NULL, GFP_NOIO);
+ ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
@@ -2899,7 +3595,6 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
-
out:
kfree(reply_buf);
@@ -2913,29 +3608,30 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
struct {
__le64 features;
__le64 incompat;
- } features_buf = { 0 };
+ } __attribute__ ((packed)) features_buf = { 0 };
u64 incompat;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_features",
- (char *) &snapid, sizeof (snapid),
- (char *) &features_buf, sizeof (features_buf),
- NULL);
+ &snapid, sizeof (snapid),
+ &features_buf, sizeof (features_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+ if (ret < sizeof (features_buf))
+ return -ERANGE;
incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_ALL)
+ if (incompat & ~RBD_FEATURES_SUPPORTED)
return -ENXIO;
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
- (unsigned long long) snap_id,
- (unsigned long long) *snap_features,
- (unsigned long long) le64_to_cpu(features_buf.incompat));
+ (unsigned long long)snap_id,
+ (unsigned long long)*snap_features,
+ (unsigned long long)le64_to_cpu(features_buf.incompat));
return 0;
}
@@ -2975,15 +3671,15 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
snapid = cpu_to_le64(CEPH_NOSNAP);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_parent",
- (char *) &snapid, sizeof (snapid),
- (char *) reply_buf, size, NULL);
+ &snapid, sizeof (snapid),
+ reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out_err;
- ret = -ERANGE;
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
+ ret = -ERANGE;
ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
if (parent_spec->pool_id == CEPH_NOPOOL)
goto out; /* No parent? No problem. */
@@ -2991,8 +3687,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
/* The ceph file layout needs to fit pool id in 32 bits */
ret = -EIO;
- if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
- goto out;
+ if (parent_spec->pool_id > (u64)U32_MAX) {
+ rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
+ (unsigned long long)parent_spec->pool_id, U32_MAX);
+ goto out_err;
+ }
image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(image_id)) {
@@ -3015,6 +3714,56 @@ out_err:
return ret;
}
+static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+{
+ struct {
+ __le64 stripe_unit;
+ __le64 stripe_count;
+ } __attribute__ ((packed)) striping_info_buf = { 0 };
+ size_t size = sizeof (striping_info_buf);
+ void *p;
+ u64 obj_size;
+ u64 stripe_unit;
+ u64 stripe_count;
+ int ret;
+
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_stripe_unit_count", NULL, 0,
+ (char *)&striping_info_buf, size);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -ERANGE;
+
+ /*
+ * We don't actually support the "fancy striping" feature
+ * (STRIPINGV2) yet, but if the striping sizes are the
+ * defaults the behavior is the same as before. So find
+ * out, and only fail if the image has non-default values.
+ */
+ ret = -EINVAL;
+ obj_size = (u64)1 << rbd_dev->header.obj_order;
+ p = &striping_info_buf;
+ stripe_unit = ceph_decode_64(&p);
+ if (stripe_unit != obj_size) {
+ rbd_warn(rbd_dev, "unsupported stripe unit "
+ "(got %llu want %llu)",
+ stripe_unit, obj_size);
+ return -EINVAL;
+ }
+ stripe_count = ceph_decode_64(&p);
+ if (stripe_count != 1) {
+ rbd_warn(rbd_dev, "unsupported stripe count "
+ "(got %llu want 1)", stripe_count);
+ return -EINVAL;
+ }
+ rbd_dev->header.stripe_unit = stripe_unit;
+ rbd_dev->header.stripe_count = stripe_count;
+
+ return 0;
+}
+
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
size_t image_id_size;
@@ -3036,8 +3785,8 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
return NULL;
p = image_id;
- end = (char *) image_id + image_id_size;
- ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
+ end = image_id + image_id_size;
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
reply_buf = kmalloc(size, GFP_KERNEL);
@@ -3047,11 +3796,12 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
"rbd", "dir_get_name",
image_id, image_id_size,
- (char *) reply_buf, size, NULL);
+ reply_buf, size);
if (ret < 0)
goto out;
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
+
image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
if (IS_ERR(image_name))
image_name = NULL;
@@ -3064,69 +3814,134 @@ out:
return image_name;
}
+static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
+{
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ const char *snap_name;
+ u32 which = 0;
+
+ /* Skip over names until we find the one we are looking for */
+
+ snap_name = rbd_dev->header.snap_names;
+ while (which < snapc->num_snaps) {
+ if (!strcmp(name, snap_name))
+ return snapc->snaps[which];
+ snap_name += strlen(snap_name) + 1;
+ which++;
+ }
+ return CEPH_NOSNAP;
+}
+
+static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
+{
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ u32 which;
+ bool found = false;
+ u64 snap_id;
+
+ for (which = 0; !found && which < snapc->num_snaps; which++) {
+ const char *snap_name;
+
+ snap_id = snapc->snaps[which];
+ snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
+ if (IS_ERR(snap_name))
+ break;
+ found = !strcmp(name, snap_name);
+ kfree(snap_name);
+ }
+ return found ? snap_id : CEPH_NOSNAP;
+}
+
/*
- * When a parent image gets probed, we only have the pool, image,
- * and snapshot ids but not the names of any of them. This call
- * is made later to fill in those names. It has to be done after
- * rbd_dev_snaps_update() has completed because some of the
- * information (in particular, snapshot name) is not available
- * until then.
+ * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
+ * no snapshot by that name is found, or if an error occurs.
*/
-static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
+static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
- struct ceph_osd_client *osdc;
- const char *name;
- void *reply_buf = NULL;
+ if (rbd_dev->image_format == 1)
+ return rbd_v1_snap_id_by_name(rbd_dev, name);
+
+ return rbd_v2_snap_id_by_name(rbd_dev, name);
+}
+
+/*
+ * When an rbd image has a parent image, it is identified by the
+ * pool, image, and snapshot ids (not names). This function fills
+ * in the names for those ids. (It's OK if we can't figure out the
+ * name for an image id, but the pool and snapshot ids should always
+ * exist and have names.) All names in an rbd spec are dynamically
+ * allocated.
+ *
+ * When an image being mapped (not a parent) is probed, we have the
+ * pool name and pool id, image name and image id, and the snapshot
+ * name. The only thing we're missing is the snapshot id.
+ */
+static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_spec *spec = rbd_dev->spec;
+ const char *pool_name;
+ const char *image_name;
+ const char *snap_name;
int ret;
- if (rbd_dev->spec->pool_name)
- return 0; /* Already have the names */
+ /*
+ * An image being mapped will have the pool name (etc.), but
+ * we need to look up the snapshot id.
+ */
+ if (spec->pool_name) {
+ if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
+ u64 snap_id;
+
+ snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
+ if (snap_id == CEPH_NOSNAP)
+ return -ENOENT;
+ spec->snap_id = snap_id;
+ } else {
+ spec->snap_id = CEPH_NOSNAP;
+ }
- /* Look up the pool name */
+ return 0;
+ }
- osdc = &rbd_dev->rbd_client->client->osdc;
- name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
- if (!name) {
- rbd_warn(rbd_dev, "there is no pool with id %llu",
- rbd_dev->spec->pool_id); /* Really a BUG() */
+ /* Get the pool name; we have to make our own copy of this */
+
+ pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
+ if (!pool_name) {
+ rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
return -EIO;
}
-
- rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
- if (!rbd_dev->spec->pool_name)
+ pool_name = kstrdup(pool_name, GFP_KERNEL);
+ if (!pool_name)
return -ENOMEM;
/* Fetch the image name; tolerate failure here */
- name = rbd_dev_image_name(rbd_dev);
- if (name)
- rbd_dev->spec->image_name = (char *) name;
- else
+ image_name = rbd_dev_image_name(rbd_dev);
+ if (!image_name)
rbd_warn(rbd_dev, "unable to get image name");
- /* Look up the snapshot name. */
+ /* Look up the snapshot name, and make a copy */
- name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
- if (!name) {
- rbd_warn(rbd_dev, "no snapshot with id %llu",
- rbd_dev->spec->snap_id); /* Really a BUG() */
- ret = -EIO;
+ snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
+ if (!snap_name) {
+ ret = -ENOMEM;
goto out_err;
}
- rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
- if(!rbd_dev->spec->snap_name)
- goto out_err;
+
+ spec->pool_name = pool_name;
+ spec->image_name = image_name;
+ spec->snap_name = snap_name;
return 0;
out_err:
- kfree(reply_buf);
- kfree(rbd_dev->spec->pool_name);
- rbd_dev->spec->pool_name = NULL;
+ kfree(image_name);
+ kfree(pool_name);
return ret;
}
-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
{
size_t size;
int ret;
@@ -3151,16 +3966,15 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
- "rbd", "get_snapcontext",
- NULL, 0,
- reply_buf, size, ver);
+ "rbd", "get_snapcontext", NULL, 0,
+ reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
- ret = -ERANGE;
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
+ ret = -ERANGE;
ceph_decode_64_safe(&p, end, seq, out);
ceph_decode_32_safe(&p, end, snap_count, out);
@@ -3177,37 +3991,33 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
}
if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
goto out;
+ ret = 0;
- size = sizeof (struct ceph_snap_context) +
- snap_count * sizeof (snapc->snaps[0]);
- snapc = kmalloc(size, GFP_KERNEL);
+ snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc) {
ret = -ENOMEM;
goto out;
}
-
- atomic_set(&snapc->nref, 1);
snapc->seq = seq;
- snapc->num_snaps = snap_count;
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
rbd_dev->header.snapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
- (unsigned long long) seq, (unsigned int) snap_count);
-
+ (unsigned long long)seq, (unsigned int)snap_count);
out:
kfree(reply_buf);
- return 0;
+ return ret;
}
-static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
+static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
+ u64 snap_id)
{
size_t size;
void *reply_buf;
- __le64 snap_id;
+ __le64 snapid;
int ret;
void *p;
void *end;
@@ -3218,236 +4028,52 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
if (!reply_buf)
return ERR_PTR(-ENOMEM);
- snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
+ snapid = cpu_to_le64(snap_id);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapshot_name",
- (char *) &snap_id, sizeof (snap_id),
- reply_buf, size, NULL);
+ &snapid, sizeof (snapid),
+ reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
- if (ret < 0)
+ if (ret < 0) {
+ snap_name = ERR_PTR(ret);
goto out;
+ }
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
- if (IS_ERR(snap_name)) {
- ret = PTR_ERR(snap_name);
+ if (IS_ERR(snap_name))
goto out;
- } else {
- dout(" snap_id 0x%016llx snap_name = %s\n",
- (unsigned long long) le64_to_cpu(snap_id), snap_name);
- }
- kfree(reply_buf);
- return snap_name;
+ dout(" snap_id 0x%016llx snap_name = %s\n",
+ (unsigned long long)snap_id, snap_name);
out:
kfree(reply_buf);
- return ERR_PTR(ret);
-}
-
-static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
- u64 *snap_size, u64 *snap_features)
-{
- u64 snap_id;
- u8 order;
- int ret;
-
- snap_id = rbd_dev->header.snapc->snaps[which];
- ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
- if (ret)
- return ERR_PTR(ret);
- ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
- if (ret)
- return ERR_PTR(ret);
-
- return rbd_dev_v2_snap_name(rbd_dev, which);
-}
-
-static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
- u64 *snap_size, u64 *snap_features)
-{
- if (rbd_dev->image_format == 1)
- return rbd_dev_v1_snap_info(rbd_dev, which,
- snap_size, snap_features);
- if (rbd_dev->image_format == 2)
- return rbd_dev_v2_snap_info(rbd_dev, which,
- snap_size, snap_features);
- return ERR_PTR(-EINVAL);
+ return snap_name;
}
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
{
int ret;
- __u8 obj_order;
down_write(&rbd_dev->header_rwsem);
- /* Grab old order first, to see if it changes */
-
- obj_order = rbd_dev->header.obj_order,
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
goto out;
- if (rbd_dev->header.obj_order != obj_order) {
- ret = -EIO;
- goto out;
- }
rbd_update_mapping_size(rbd_dev);
- ret = rbd_dev_v2_snap_context(rbd_dev, hver);
+ ret = rbd_dev_v2_snap_context(rbd_dev);
dout("rbd_dev_v2_snap_context returned %d\n", ret);
if (ret)
goto out;
- ret = rbd_dev_snaps_update(rbd_dev);
- dout("rbd_dev_snaps_update returned %d\n", ret);
- if (ret)
- goto out;
- ret = rbd_dev_snaps_register(rbd_dev);
- dout("rbd_dev_snaps_register returned %d\n", ret);
out:
up_write(&rbd_dev->header_rwsem);
return ret;
}
-/*
- * Scan the rbd device's current snapshot list and compare it to the
- * newly-received snapshot context. Remove any existing snapshots
- * not present in the new snapshot context. Add a new snapshot for
- * any snaphots in the snapshot context not in the current list.
- * And verify there are no changes to snapshots we already know
- * about.
- *
- * Assumes the snapshots in the snapshot context are sorted by
- * snapshot id, highest id first. (Snapshots in the rbd_dev's list
- * are also maintained in that order.)
- */
-static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
-{
- struct ceph_snap_context *snapc = rbd_dev->header.snapc;
- const u32 snap_count = snapc->num_snaps;
- struct list_head *head = &rbd_dev->snaps;
- struct list_head *links = head->next;
- u32 index = 0;
-
- dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
- while (index < snap_count || links != head) {
- u64 snap_id;
- struct rbd_snap *snap;
- char *snap_name;
- u64 snap_size = 0;
- u64 snap_features = 0;
-
- snap_id = index < snap_count ? snapc->snaps[index]
- : CEPH_NOSNAP;
- snap = links != head ? list_entry(links, struct rbd_snap, node)
- : NULL;
- rbd_assert(!snap || snap->id != CEPH_NOSNAP);
-
- if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
- struct list_head *next = links->next;
-
- /*
- * A previously-existing snapshot is not in
- * the new snap context.
- *
- * If the now missing snapshot is the one the
- * image is mapped to, clear its exists flag
- * so we can avoid sending any more requests
- * to it.
- */
- if (rbd_dev->spec->snap_id == snap->id)
- clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- rbd_remove_snap_dev(snap);
- dout("%ssnap id %llu has been removed\n",
- rbd_dev->spec->snap_id == snap->id ?
- "mapped " : "",
- (unsigned long long) snap->id);
-
- /* Done with this list entry; advance */
-
- links = next;
- continue;
- }
-
- snap_name = rbd_dev_snap_info(rbd_dev, index,
- &snap_size, &snap_features);
- if (IS_ERR(snap_name))
- return PTR_ERR(snap_name);
-
- dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
- (unsigned long long) snap_id);
- if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
- struct rbd_snap *new_snap;
-
- /* We haven't seen this snapshot before */
-
- new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
- snap_id, snap_size, snap_features);
- if (IS_ERR(new_snap)) {
- int err = PTR_ERR(new_snap);
-
- dout(" failed to add dev, error %d\n", err);
-
- return err;
- }
-
- /* New goes before existing, or at end of list */
-
- dout(" added dev%s\n", snap ? "" : " at end\n");
- if (snap)
- list_add_tail(&new_snap->node, &snap->node);
- else
- list_add_tail(&new_snap->node, head);
- } else {
- /* Already have this one */
-
- dout(" already present\n");
-
- rbd_assert(snap->size == snap_size);
- rbd_assert(!strcmp(snap->name, snap_name));
- rbd_assert(snap->features == snap_features);
-
- /* Done with this list entry; advance */
-
- links = links->next;
- }
-
- /* Advance to the next entry in the snapshot context */
-
- index++;
- }
- dout("%s: done\n", __func__);
-
- return 0;
-}
-
-/*
- * Scan the list of snapshots and register the devices for any that
- * have not already been registered.
- */
-static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
-{
- struct rbd_snap *snap;
- int ret = 0;
-
- dout("%s:\n", __func__);
- if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
- return -EIO;
-
- list_for_each_entry(snap, &rbd_dev->snaps, node) {
- if (!rbd_snap_registered(snap)) {
- ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
- if (ret < 0)
- break;
- }
- }
- dout("%s: returning %d\n", __func__, ret);
-
- return ret;
-}
-
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
struct device *dev;
@@ -3459,7 +4085,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
dev->bus = &rbd_bus_type;
dev->type = &rbd_device_type;
dev->parent = &rbd_root_dev;
- dev->release = rbd_dev_release;
+ dev->release = rbd_dev_device_release;
dev_set_name(dev, "%d", rbd_dev->dev_id);
ret = device_register(dev);
@@ -3673,6 +4299,7 @@ static int rbd_add_parse_args(const char *buf,
size_t len;
char *options;
const char *mon_addrs;
+ char *snap_name;
size_t mon_addrs_size;
struct rbd_spec *spec = NULL;
struct rbd_options *rbd_opts = NULL;
@@ -3731,10 +4358,11 @@ static int rbd_add_parse_args(const char *buf,
ret = -ENAMETOOLONG;
goto out_err;
}
- spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
- if (!spec->snap_name)
+ snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
+ if (!snap_name)
goto out_mem;
- *(spec->snap_name + len) = '\0';
+ *(snap_name + len) = '\0';
+ spec->snap_name = snap_name;
/* Initialize all rbd options to the defaults */
@@ -3788,15 +4416,19 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
size_t size;
char *object_name;
void *response;
- void *p;
+ char *image_id;
/*
* When probing a parent image, the image id is already
* known (and the image name likely is not). There's no
- * need to fetch the image id again in this case.
+ * need to fetch the image id again in this case. We
+ * do still need to set the image format though.
*/
- if (rbd_dev->spec->image_id)
+ if (rbd_dev->spec->image_id) {
+ rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
+
return 0;
+ }
/*
* First, see if the format 2 image id file exists, and if
@@ -3818,23 +4450,32 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
goto out;
}
+ /* If it doesn't exist we'll assume it's a format 1 image */
+
ret = rbd_obj_method_sync(rbd_dev, object_name,
- "rbd", "get_id",
- NULL, 0,
- response, RBD_IMAGE_ID_LEN_MAX, NULL);
+ "rbd", "get_id", NULL, 0,
+ response, RBD_IMAGE_ID_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
- if (ret < 0)
- goto out;
-
- p = response;
- rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
- p + RBD_IMAGE_ID_LEN_MAX,
+ if (ret == -ENOENT) {
+ image_id = kstrdup("", GFP_KERNEL);
+ ret = image_id ? 0 : -ENOMEM;
+ if (!ret)
+ rbd_dev->image_format = 1;
+ } else if (ret > sizeof (__le32)) {
+ void *p = response;
+
+ image_id = ceph_extract_encoded_string(&p, p + ret,
NULL, GFP_NOIO);
- if (IS_ERR(rbd_dev->spec->image_id)) {
- ret = PTR_ERR(rbd_dev->spec->image_id);
- rbd_dev->spec->image_id = NULL;
+ ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
+ if (!ret)
+ rbd_dev->image_format = 2;
} else {
- dout("image_id is %s\n", rbd_dev->spec->image_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ rbd_dev->spec->image_id = image_id;
+ dout("image_id is %s\n", image_id);
}
out:
kfree(response);
@@ -3843,27 +4484,30 @@ out:
return ret;
}
-static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+/* Undo whatever state changes are made by v1 or v2 image probe */
+
+static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
- int ret;
- size_t size;
+ struct rbd_image_header *header;
- /* Version 1 images have no id; empty string is used */
+ rbd_dev_remove_parent(rbd_dev);
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = NULL;
+ rbd_dev->parent_overlap = 0;
- rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
- if (!rbd_dev->spec->image_id)
- return -ENOMEM;
+ /* Free dynamic fields from the header, then zero it out */
- /* Record the header object name for this rbd image. */
+ header = &rbd_dev->header;
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_sizes);
+ kfree(header->snap_names);
+ kfree(header->object_prefix);
+ memset(header, 0, sizeof (*header));
+}
- size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name) {
- ret = -ENOMEM;
- goto out_err;
- }
- sprintf(rbd_dev->header_name, "%s%s",
- rbd_dev->spec->image_name, RBD_SUFFIX);
+static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
/* Populate rbd image metadata */
@@ -3876,8 +4520,6 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
rbd_dev->parent_spec = NULL;
rbd_dev->parent_overlap = 0;
- rbd_dev->image_format = 1;
-
dout("discovered version 1 image, header name is %s\n",
rbd_dev->header_name);
@@ -3894,43 +4536,45 @@ out_err:
static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
{
- size_t size;
int ret;
- u64 ver = 0;
-
- /*
- * Image id was filled in by the caller. Record the header
- * object name for this rbd image.
- */
- size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name)
- return -ENOMEM;
- sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
-
- /* Get the size and object order for the image */
ret = rbd_dev_v2_image_size(rbd_dev);
- if (ret < 0)
+ if (ret)
goto out_err;
/* Get the object prefix (a.k.a. block_name) for the image */
ret = rbd_dev_v2_object_prefix(rbd_dev);
- if (ret < 0)
+ if (ret)
goto out_err;
/* Get the and check features for the image */
ret = rbd_dev_v2_features(rbd_dev);
- if (ret < 0)
+ if (ret)
goto out_err;
/* If the image supports layering, get the parent info */
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret)
+ goto out_err;
+
+ /*
+ * Don't print a warning for parent images. We can
+ * tell this point because we won't know its pool
+ * name yet (just its pool id).
+ */
+ if (rbd_dev->spec->pool_name)
+ rbd_warn(rbd_dev, "WARNING: kernel layering "
+ "is EXPERIMENTAL!");
+ }
+
+ /* If the image supports fancy striping, get its parameters */
+
+ if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
+ ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
@@ -3942,12 +4586,9 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
/* Get the snapshot context, plus the header version */
- ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
+ ret = rbd_dev_v2_snap_context(rbd_dev);
if (ret)
goto out_err;
- rbd_dev->header.obj_version = ver;
-
- rbd_dev->image_format = 2;
dout("discovered version 2 image, header name is %s\n",
rbd_dev->header_name);
@@ -3965,22 +4606,54 @@ out_err:
return ret;
}
-static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
+static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
{
+ struct rbd_device *parent = NULL;
+ struct rbd_spec *parent_spec;
+ struct rbd_client *rbdc;
int ret;
- /* no need to lock here, as rbd_dev is not registered yet */
- ret = rbd_dev_snaps_update(rbd_dev);
- if (ret)
- return ret;
+ if (!rbd_dev->parent_spec)
+ return 0;
+ /*
+ * We need to pass a reference to the client and the parent
+ * spec when creating the parent rbd_dev. Images related by
+ * parent/child relationships always share both.
+ */
+ parent_spec = rbd_spec_get(rbd_dev->parent_spec);
+ rbdc = __rbd_get_client(rbd_dev->rbd_client);
- ret = rbd_dev_probe_update_spec(rbd_dev);
- if (ret)
- goto err_out_snaps;
+ ret = -ENOMEM;
+ parent = rbd_dev_create(rbdc, parent_spec);
+ if (!parent)
+ goto out_err;
+
+ ret = rbd_dev_image_probe(parent);
+ if (ret < 0)
+ goto out_err;
+ rbd_dev->parent = parent;
+
+ return 0;
+out_err:
+ if (parent) {
+ rbd_spec_put(rbd_dev->parent_spec);
+ kfree(rbd_dev->header_name);
+ rbd_dev_destroy(parent);
+ } else {
+ rbd_put_client(rbdc);
+ rbd_spec_put(parent_spec);
+ }
+
+ return ret;
+}
+
+static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
+{
+ int ret;
- ret = rbd_dev_set_mapping(rbd_dev);
+ ret = rbd_dev_mapping_set(rbd_dev);
if (ret)
- goto err_out_snaps;
+ return ret;
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
@@ -4007,54 +4680,81 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
if (ret)
goto err_out_disk;
- /*
- * At this point cleanup in the event of an error is the job
- * of the sysfs code (initiated by rbd_bus_del_dev()).
- */
- down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_snaps_register(rbd_dev);
- up_write(&rbd_dev->header_rwsem);
- if (ret)
- goto err_out_bus;
-
- ret = rbd_dev_header_watch_sync(rbd_dev, 1);
- if (ret)
- goto err_out_bus;
-
/* Everything's ready. Announce the disk to the world. */
+ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+ set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
return ret;
-err_out_bus:
- /* this will also clean up rest of rbd_dev stuff */
- rbd_bus_del_dev(rbd_dev);
-
- return ret;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
-err_out_snaps:
- rbd_remove_all_snaps(rbd_dev);
+ rbd_dev_mapping_clear(rbd_dev);
return ret;
}
+static int rbd_dev_header_name(struct rbd_device *rbd_dev)
+{
+ struct rbd_spec *spec = rbd_dev->spec;
+ size_t size;
+
+ /* Record the header object name for this rbd image. */
+
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+
+ if (rbd_dev->image_format == 1)
+ size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
+ else
+ size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
+
+ rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
+ if (!rbd_dev->header_name)
+ return -ENOMEM;
+
+ if (rbd_dev->image_format == 1)
+ sprintf(rbd_dev->header_name, "%s%s",
+ spec->image_name, RBD_SUFFIX);
+ else
+ sprintf(rbd_dev->header_name, "%s%s",
+ RBD_HEADER_PREFIX, spec->image_id);
+ return 0;
+}
+
+static void rbd_dev_image_release(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ rbd_dev_unprobe(rbd_dev);
+ ret = rbd_dev_header_watch_sync(rbd_dev, 0);
+ if (ret)
+ rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
+ kfree(rbd_dev->header_name);
+ rbd_dev->header_name = NULL;
+ rbd_dev->image_format = 0;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
+
+ rbd_dev_destroy(rbd_dev);
+}
+
/*
* Probe for the existence of the header object for the given rbd
* device. For format 2 images this includes determining the image
* id.
*/
-static int rbd_dev_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
{
int ret;
+ int tmp;
/*
* Get the id from the image id object. If it's not a
@@ -4063,18 +4763,48 @@ static int rbd_dev_probe(struct rbd_device *rbd_dev)
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
+ return ret;
+ rbd_assert(rbd_dev->spec->image_id);
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+
+ ret = rbd_dev_header_name(rbd_dev);
+ if (ret)
+ goto err_out_format;
+
+ ret = rbd_dev_header_watch_sync(rbd_dev, 1);
+ if (ret)
+ goto out_header_name;
+
+ if (rbd_dev->image_format == 1)
ret = rbd_dev_v1_probe(rbd_dev);
else
ret = rbd_dev_v2_probe(rbd_dev);
- if (ret) {
- dout("probe failed, returning %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ goto err_out_watch;
- ret = rbd_dev_probe_finish(rbd_dev);
+ ret = rbd_dev_spec_update(rbd_dev);
if (ret)
- rbd_header_free(&rbd_dev->header);
+ goto err_out_probe;
+
+ ret = rbd_dev_probe_parent(rbd_dev);
+ if (!ret)
+ return 0;
+
+err_out_probe:
+ rbd_dev_unprobe(rbd_dev);
+err_out_watch:
+ tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
+ if (tmp)
+ rbd_warn(rbd_dev, "unable to tear down watch request\n");
+out_header_name:
+ kfree(rbd_dev->header_name);
+ rbd_dev->header_name = NULL;
+err_out_format:
+ rbd_dev->image_format = 0;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
+
+ dout("probe failed, returning %d\n", ret);
return ret;
}
@@ -4111,11 +4841,13 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
if (rc < 0)
goto err_out_client;
- spec->pool_id = (u64) rc;
+ spec->pool_id = (u64)rc;
/* The ceph file layout needs to fit pool id in 32 bits */
- if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
+ if (spec->pool_id > (u64)U32_MAX) {
+ rbd_warn(NULL, "pool id too large (%llu > %u)\n",
+ (unsigned long long)spec->pool_id, U32_MAX);
rc = -EIO;
goto err_out_client;
}
@@ -4130,11 +4862,15 @@ static ssize_t rbd_add(struct bus_type *bus,
kfree(rbd_opts);
rbd_opts = NULL; /* done with this */
- rc = rbd_dev_probe(rbd_dev);
+ rc = rbd_dev_image_probe(rbd_dev);
if (rc < 0)
goto err_out_rbd_dev;
- return count;
+ rc = rbd_dev_device_setup(rbd_dev);
+ if (!rc)
+ return count;
+
+ rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
@@ -4149,7 +4885,7 @@ err_out_module:
dout("Error adding device %s\n", buf);
- return (ssize_t) rc;
+ return (ssize_t)rc;
}
static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
@@ -4169,27 +4905,43 @@ static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
return NULL;
}
-static void rbd_dev_release(struct device *dev)
+static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- if (rbd_dev->watch_event)
- rbd_dev_header_watch_sync(rbd_dev, 0);
-
- /* clean up and free blkdev */
rbd_free_disk(rbd_dev);
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+ rbd_dev_clear_mapping(rbd_dev);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
-
- /* release allocated disk header fields */
- rbd_header_free(&rbd_dev->header);
-
- /* done with the id, and with the rbd_dev */
+ rbd_dev->major = 0;
rbd_dev_id_put(rbd_dev);
- rbd_assert(rbd_dev->rbd_client != NULL);
- rbd_dev_destroy(rbd_dev);
+ rbd_dev_mapping_clear(rbd_dev);
+}
- /* release module ref */
- module_put(THIS_MODULE);
+static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
+{
+ while (rbd_dev->parent) {
+ struct rbd_device *first = rbd_dev;
+ struct rbd_device *second = first->parent;
+ struct rbd_device *third;
+
+ /*
+ * Follow to the parent with no grandparent and
+ * remove it.
+ */
+ while (second && (third = second->parent)) {
+ first = second;
+ second = third;
+ }
+ rbd_assert(second);
+ rbd_dev_image_release(second);
+ first->parent = NULL;
+ first->parent_overlap = 0;
+
+ rbd_assert(first->parent_spec);
+ rbd_spec_put(first->parent_spec);
+ first->parent_spec = NULL;
+ }
}
static ssize_t rbd_remove(struct bus_type *bus,
@@ -4197,13 +4949,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
size_t count)
{
struct rbd_device *rbd_dev = NULL;
- int target_id, rc;
+ int target_id;
unsigned long ul;
- int ret = count;
+ int ret;
- rc = strict_strtoul(buf, 10, &ul);
- if (rc)
- return rc;
+ ret = strict_strtoul(buf, 10, &ul);
+ if (ret)
+ return ret;
/* convert to int; abort if we lost anything in the conversion */
target_id = (int) ul;
@@ -4226,10 +4978,10 @@ static ssize_t rbd_remove(struct bus_type *bus,
spin_unlock_irq(&rbd_dev->lock);
if (ret < 0)
goto done;
-
- rbd_remove_all_snaps(rbd_dev);
+ ret = count;
rbd_bus_del_dev(rbd_dev);
-
+ rbd_dev_image_release(rbd_dev);
+ module_put(THIS_MODULE);
done:
mutex_unlock(&ctl_mutex);
@@ -4261,6 +5013,56 @@ static void rbd_sysfs_cleanup(void)
device_unregister(&rbd_root_dev);
}
+static int rbd_slab_init(void)
+{
+ rbd_assert(!rbd_img_request_cache);
+ rbd_img_request_cache = kmem_cache_create("rbd_img_request",
+ sizeof (struct rbd_img_request),
+ __alignof__(struct rbd_img_request),
+ 0, NULL);
+ if (!rbd_img_request_cache)
+ return -ENOMEM;
+
+ rbd_assert(!rbd_obj_request_cache);
+ rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
+ sizeof (struct rbd_obj_request),
+ __alignof__(struct rbd_obj_request),
+ 0, NULL);
+ if (!rbd_obj_request_cache)
+ goto out_err;
+
+ rbd_assert(!rbd_segment_name_cache);
+ rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
+ MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
+ if (rbd_segment_name_cache)
+ return 0;
+out_err:
+ if (rbd_obj_request_cache) {
+ kmem_cache_destroy(rbd_obj_request_cache);
+ rbd_obj_request_cache = NULL;
+ }
+
+ kmem_cache_destroy(rbd_img_request_cache);
+ rbd_img_request_cache = NULL;
+
+ return -ENOMEM;
+}
+
+static void rbd_slab_exit(void)
+{
+ rbd_assert(rbd_segment_name_cache);
+ kmem_cache_destroy(rbd_segment_name_cache);
+ rbd_segment_name_cache = NULL;
+
+ rbd_assert(rbd_obj_request_cache);
+ kmem_cache_destroy(rbd_obj_request_cache);
+ rbd_obj_request_cache = NULL;
+
+ rbd_assert(rbd_img_request_cache);
+ kmem_cache_destroy(rbd_img_request_cache);
+ rbd_img_request_cache = NULL;
+}
+
static int __init rbd_init(void)
{
int rc;
@@ -4270,16 +5072,22 @@ static int __init rbd_init(void)
return -EINVAL;
}
- rc = rbd_sysfs_init();
+ rc = rbd_slab_init();
if (rc)
return rc;
- pr_info("loaded " RBD_DRV_NAME_LONG "\n");
- return 0;
+ rc = rbd_sysfs_init();
+ if (rc)
+ rbd_slab_exit();
+ else
+ pr_info("loaded " RBD_DRV_NAME_LONG "\n");
+
+ return rc;
}
static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
+ rbd_slab_exit();
}
module_init(rbd_init);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8766a22..2f445b7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -673,7 +673,7 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
@@ -687,8 +687,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
if (fs->ref_count == 0)
swim_motor(base, OFF);
mutex_unlock(&swim_mutex);
-
- return 0;
}
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 758f2ac..20e061c 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,7 +251,7 @@ static int fd_eject(struct floppy_state *fs);
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param);
static int floppy_open(struct block_device *bdev, fmode_t mode);
-static int floppy_release(struct gendisk *disk, fmode_t mode);
+static void floppy_release(struct gendisk *disk, fmode_t mode);
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
@@ -1017,7 +1017,7 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
@@ -1029,7 +1029,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
swim3_select(fs, RELAX);
}
mutex_unlock(&swim3_mutex);
- return 0;
}
static unsigned int floppy_check_events(struct gendisk *disk,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 8ad21a2..6472395 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -100,96 +100,103 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
return vbr;
}
-static void virtblk_add_buf_wait(struct virtio_blk *vblk,
- struct virtblk_req *vbr,
- unsigned long out,
- unsigned long in)
+static int __virtblk_add_req(struct virtqueue *vq,
+ struct virtblk_req *vbr,
+ struct scatterlist *data_sg,
+ bool have_data)
{
- DEFINE_WAIT(wait);
+ struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
+ unsigned int num_out = 0, num_in = 0;
+ int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
- for (;;) {
- prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
+ sgs[num_out++] = &hdr;
- spin_lock_irq(vblk->disk->queue->queue_lock);
- if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
- GFP_ATOMIC) < 0) {
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- io_schedule();
- } else {
- virtqueue_kick(vblk->vq);
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- break;
- }
+ /*
+ * If this is a packet command we need a couple of additional headers.
+ * Behind the normal outhdr we put a segment with the scsi command
+ * block, and before the normal inhdr we put the sense data and the
+ * inhdr with additional status information.
+ */
+ if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
+ sgs[num_out++] = &cmd;
+ }
+ if (have_data) {
+ if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
+ sgs[num_out++] = data_sg;
+ else
+ sgs[num_out + num_in++] = data_sg;
}
- finish_wait(&vblk->queue_wait, &wait);
+ if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ sgs[num_out + num_in++] = &sense;
+ sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
+ sgs[num_out + num_in++] = &inhdr;
+ }
+
+ sg_init_one(&status, &vbr->status, sizeof(vbr->status));
+ sgs[num_out + num_in++] = &status;
+
+ return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_add_req(struct virtblk_req *vbr,
- unsigned int out, unsigned int in)
+static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
{
struct virtio_blk *vblk = vbr->vblk;
+ DEFINE_WAIT(wait);
+ int ret;
spin_lock_irq(vblk->disk->queue->queue_lock);
- if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
- GFP_ATOMIC) < 0)) {
+ while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
+ have_data)) < 0)) {
+ prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
spin_unlock_irq(vblk->disk->queue->queue_lock);
- virtblk_add_buf_wait(vblk, vbr, out, in);
- return;
+ io_schedule();
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+
+ finish_wait(&vblk->queue_wait, &wait);
}
+
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
}
-static int virtblk_bio_send_flush(struct virtblk_req *vbr)
+static void virtblk_bio_send_flush(struct virtblk_req *vbr)
{
- unsigned int out = 0, in = 0;
-
vbr->flags |= VBLK_IS_FLUSH;
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = 0;
- sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
- sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
-
- virtblk_add_req(vbr, out, in);
- return 0;
+ virtblk_add_req(vbr, false);
}
-static int virtblk_bio_send_data(struct virtblk_req *vbr)
+static void virtblk_bio_send_data(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
- unsigned int num, out = 0, in = 0;
struct bio *bio = vbr->bio;
+ bool have_data;
vbr->flags &= ~VBLK_IS_FLUSH;
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = bio->bi_sector;
vbr->out_hdr.ioprio = bio_prio(bio);
- sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
-
- num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);
-
- sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
- sizeof(vbr->status));
-
- if (num) {
- if (bio->bi_rw & REQ_WRITE) {
+ if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
+ have_data = true;
+ if (bio->bi_rw & REQ_WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out += num;
- } else {
+ else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- in += num;
- }
- }
+ } else
+ have_data = false;
- virtblk_add_req(vbr, out, in);
-
- return 0;
+ virtblk_add_req(vbr, have_data);
}
static void virtblk_bio_send_data_work(struct work_struct *work)
@@ -298,7 +305,7 @@ static void virtblk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
- unsigned long num, out = 0, in = 0;
+ unsigned int num;
struct virtblk_req *vbr;
vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
@@ -335,40 +342,15 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
-
- /*
- * If this is a packet command we need a couple of additional headers.
- * Behind the normal outhdr we put a segment with the scsi command
- * block, and before the normal inhdr we put the sense data and the
- * inhdr with additional status information before the normal inhdr.
- */
- if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
- sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
-
- num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
-
- if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
- sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
- sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
- sizeof(vbr->in_hdr));
- }
-
- sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
- sizeof(vbr->status));
-
+ num = blk_rq_map_sg(q, vbr->req, vblk->sg);
if (num) {
- if (rq_data_dir(vbr->req) == WRITE) {
+ if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out += num;
- } else {
+ else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- in += num;
- }
}
- if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
- GFP_ATOMIC) < 0) {
+ if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -539,6 +521,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
+ char *envp[] = { "RESIZE=1", NULL };
u64 capacity, size;
mutex_lock(&vblk->config_lock);
@@ -568,6 +551,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
+ kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
done:
mutex_unlock(&vblk->config_lock);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a894f88..d89ef86 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1617,7 +1617,7 @@ out:
return err;
}
-static int blkif_release(struct gendisk *disk, fmode_t mode)
+static void blkif_release(struct gendisk *disk, fmode_t mode)
{
struct blkfront_info *info = disk->private_data;
struct block_device *bdev;
@@ -1658,7 +1658,6 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
out:
bdput(bdev);
mutex_unlock(&blkfront_mutex);
- return 0;
}
static const struct block_device_operations xlvbd_block_fops =
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 1f38643..f8ef15f 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -915,7 +915,7 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int ace_release(struct gendisk *disk, fmode_t mode)
+static void ace_release(struct gendisk *disk, fmode_t mode)
{
struct ace_device *ace = disk->private_data;
unsigned long flags;
@@ -932,7 +932,6 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
}
spin_unlock_irqrestore(&ace->lock, flags);
mutex_unlock(&xsysace_mutex);
- return 0;
}
static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index a22e3f8..5a95baf 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -309,20 +309,18 @@ err_out:
return rc;
}
-static int
+static void
z2_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&z2ram_mutex);
if ( current_device == -1 ) {
mutex_unlock(&z2ram_mutex);
- return 0;
+ return;
}
mutex_unlock(&z2ram_mutex);
/*
* FIXME: unmap memory
*/
-
- return 0;
}
static const struct block_device_operations z2_fops =
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0f51ed6..b05ecab 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -4,6 +4,13 @@
menu "Bus devices"
+config MVEBU_MBUS
+ bool
+ depends on PLAT_ORION
+ help
+ Driver needed for the MBus configuration on Marvell EBU SoCs
+ (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
+
config OMAP_OCP2SCP
tristate "OMAP OCP2SCP DRIVER"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 45d997c..3c7b53c 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -2,6 +2,7 @@
# Makefile for the bus drivers.
#
+obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
# Interconnect bus driver for OMAP SoCs.
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
new file mode 100644
index 0000000..8740f46
--- /dev/null
+++ b/drivers/bus/mvebu-mbus.c
@@ -0,0 +1,870 @@
+/*
+ * Address map functions for Marvell EBU SoCs (Kirkwood, Armada
+ * 370/XP, Dove, Orion5x and MV78xx0)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Marvell EBU SoCs have a configurable physical address space:
+ * the physical address at which certain devices (PCIe, NOR, NAND,
+ * etc.) sit can be configured. The configuration takes place through
+ * two sets of registers:
+ *
+ * - One to configure the access of the CPU to the devices. Depending
+ * on the families, there are between 8 and 20 configurable windows,
+ * each can be use to create a physical memory window that maps to a
+ * specific device. Devices are identified by a tuple (target,
+ * attribute).
+ *
+ * - One to configure the access to the CPU to the SDRAM. There are
+ * either 2 (for Dove) or 4 (for other families) windows to map the
+ * SDRAM into the physical address space.
+ *
+ * This driver:
+ *
+ * - Reads out the SDRAM address decoding windows at initialization
+ * time, and fills the mvebu_mbus_dram_info structure with these
+ * informations. The exported function mv_mbus_dram_info() allow
+ * device drivers to get those informations related to the SDRAM
+ * address decoding windows. This is because devices also have their
+ * own windows (configured through registers that are part of each
+ * device register space), and therefore the drivers for Marvell
+ * devices have to configure those device -> SDRAM windows to ensure
+ * that DMA works properly.
+ *
+ * - Provides an API for platform code or device drivers to
+ * dynamically add or remove address decoding windows for the CPU ->
+ * device accesses. This API is mvebu_mbus_add_window(),
+ * mvebu_mbus_add_window_remap_flags() and
+ * mvebu_mbus_del_window(). Since the (target, attribute) values
+ * differ from one SoC family to another, the API uses a 'const char
+ * *' string to identify devices, and this driver is responsible for
+ * knowing the mapping between the name of a device and its
+ * corresponding (target, attribute) in the current SoC family.
+ *
+ * - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
+ * see the list of CPU -> SDRAM windows and their configuration
+ * (file 'sdram') and the list of CPU -> devices windows and their
+ * configuration (file 'devices').
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+
+/*
+ * DDR target is the same on all platforms.
+ */
+#define TARGET_DDR 0
+
+/*
+ * CPU Address Decode Windows registers
+ */
+#define WIN_CTRL_OFF 0x0000
+#define WIN_CTRL_ENABLE BIT(0)
+#define WIN_CTRL_TGT_MASK 0xf0
+#define WIN_CTRL_TGT_SHIFT 4
+#define WIN_CTRL_ATTR_MASK 0xff00
+#define WIN_CTRL_ATTR_SHIFT 8
+#define WIN_CTRL_SIZE_MASK 0xffff0000
+#define WIN_CTRL_SIZE_SHIFT 16
+#define WIN_BASE_OFF 0x0004
+#define WIN_BASE_LOW 0xffff0000
+#define WIN_BASE_HIGH 0xf
+#define WIN_REMAP_LO_OFF 0x0008
+#define WIN_REMAP_LOW 0xffff0000
+#define WIN_REMAP_HI_OFF 0x000c
+
+#define ATTR_HW_COHERENCY (0x1 << 4)
+
+#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
+#define DDR_BASE_CS_HIGH_MASK 0xf
+#define DDR_BASE_CS_LOW_MASK 0xff000000
+#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
+#define DDR_SIZE_ENABLED BIT(0)
+#define DDR_SIZE_CS_MASK 0x1c
+#define DDR_SIZE_CS_SHIFT 2
+#define DDR_SIZE_MASK 0xff000000
+
+#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
+
+struct mvebu_mbus_mapping {
+ const char *name;
+ u8 target;
+ u8 attr;
+ u8 attrmask;
+};
+
+/*
+ * Masks used for the 'attrmask' field of mvebu_mbus_mapping. They
+ * allow to get the real attribute value, discarding the special bits
+ * used to select a PCI MEM region or a PCI WA region. This allows the
+ * debugfs code to reverse-match the name of a device from its
+ * target/attr values.
+ *
+ * For all devices except PCI, all bits of 'attr' must be
+ * considered. For most SoCs, only bit 3 should be ignored (it allows
+ * to select between PCI MEM and PCI I/O). On Orion5x however, there
+ * is the special bit 5 to select a PCI WA region.
+ */
+#define MAPDEF_NOMASK 0xff
+#define MAPDEF_PCIMASK 0xf7
+#define MAPDEF_ORIONPCIMASK 0xd7
+
+/* Macro used to define one mvebu_mbus_mapping entry */
+#define MAPDEF(__n, __t, __a, __m) \
+ { .name = __n, .target = __t, .attr = __a, .attrmask = __m }
+
+struct mvebu_mbus_state;
+
+struct mvebu_mbus_soc_data {
+ unsigned int num_wins;
+ unsigned int num_remappable_wins;
+ unsigned int (*win_cfg_offset)(const int win);
+ void (*setup_cpu_target)(struct mvebu_mbus_state *s);
+ int (*show_cpu_target)(struct mvebu_mbus_state *s,
+ struct seq_file *seq, void *v);
+ const struct mvebu_mbus_mapping *map;
+};
+
+struct mvebu_mbus_state {
+ void __iomem *mbuswins_base;
+ void __iomem *sdramwins_base;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_sdram;
+ struct dentry *debugfs_devs;
+ const struct mvebu_mbus_soc_data *soc;
+ int hw_io_coherency;
+};
+
+static struct mvebu_mbus_state mbus_state;
+
+static struct mbus_dram_target_info mvebu_mbus_dram_info;
+const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return &mvebu_mbus_dram_info;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
+
+/*
+ * Functions to manipulate the address decoding windows
+ */
+
+static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
+ int win, int *enabled, u64 *base,
+ u32 *size, u8 *target, u8 *attr,
+ u64 *remap)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 basereg = readl(addr + WIN_BASE_OFF);
+ u32 ctrlreg = readl(addr + WIN_CTRL_OFF);
+
+ if (!(ctrlreg & WIN_CTRL_ENABLE)) {
+ *enabled = 0;
+ return;
+ }
+
+ *enabled = 1;
+ *base = ((u64)basereg & WIN_BASE_HIGH) << 32;
+ *base |= (basereg & WIN_BASE_LOW);
+ *size = (ctrlreg | ~WIN_CTRL_SIZE_MASK) + 1;
+
+ if (target)
+ *target = (ctrlreg & WIN_CTRL_TGT_MASK) >> WIN_CTRL_TGT_SHIFT;
+
+ if (attr)
+ *attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
+
+ if (remap) {
+ if (win < mbus->soc->num_remappable_wins) {
+ u32 remap_low = readl(addr + WIN_REMAP_LO_OFF);
+ u32 remap_hi = readl(addr + WIN_REMAP_HI_OFF);
+ *remap = ((u64)remap_hi << 32) | remap_low;
+ } else
+ *remap = 0;
+ }
+}
+
+static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
+ int win)
+{
+ void __iomem *addr;
+
+ addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
+
+ writel(0, addr + WIN_BASE_OFF);
+ writel(0, addr + WIN_CTRL_OFF);
+ if (win < mbus->soc->num_remappable_wins) {
+ writel(0, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+}
+
+/* Checks whether the given window number is available */
+static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl = readl(addr + WIN_CTRL_OFF);
+ return !(ctrl & WIN_CTRL_ENABLE);
+}
+
+/*
+ * Checks whether the given (base, base+size) area doesn't overlap an
+ * existing region
+ */
+static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ u8 target, u8 attr)
+{
+ u64 end = (u64)base + size;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wend;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, NULL);
+
+ if (!enabled)
+ continue;
+
+ wend = wbase + wsize;
+
+ /*
+ * Check if the current window overlaps with the
+ * proposed physical range
+ */
+ if ((u64)base < wend && end > wbase)
+ return 0;
+
+ /*
+ * Check if target/attribute conflicts
+ */
+ if (target == wtarget && attr == wattr)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int mvebu_mbus_find_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size)
+{
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase;
+ u32 wsize;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ NULL, NULL, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (base == wbase && size == wsize)
+ return win;
+ }
+
+ return -ENODEV;
+}
+
+static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
+ int win, phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl, remap_addr;
+
+ ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
+ (attr << WIN_CTRL_ATTR_SHIFT) |
+ (target << WIN_CTRL_TGT_SHIFT) |
+ WIN_CTRL_ENABLE;
+
+ writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
+ writel(ctrl, addr + WIN_CTRL_OFF);
+ if (win < mbus->soc->num_remappable_wins) {
+ if (remap == MVEBU_MBUS_NO_REMAP)
+ remap_addr = base;
+ else
+ remap_addr = remap;
+ writel(remap_addr & WIN_REMAP_LOW, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+
+ return 0;
+}
+
+static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ int win;
+
+ if (remap == MVEBU_MBUS_NO_REMAP) {
+ for (win = mbus->soc->num_remappable_wins;
+ win < mbus->soc->num_wins; win++)
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base,
+ size, remap,
+ target, attr);
+ }
+
+
+ for (win = 0; win < mbus->soc->num_wins; win++)
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base, size,
+ remap, target, attr);
+
+ return -ENOMEM;
+}
+
+/*
+ * Debugfs debugging
+ */
+
+/* Common function used for Dove, Kirkwood, Armada 370/XP and Orion 5x */
+static int mvebu_sdram_debug_show_orion(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 basereg = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 sizereg = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(sizereg & DDR_SIZE_ENABLED)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = ((u64)basereg & DDR_BASE_CS_HIGH_MASK) << 32;
+ base |= basereg & DDR_BASE_CS_LOW_MASK;
+ size = (sizereg | ~DDR_SIZE_MASK);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size + 1,
+ (sizereg & DDR_SIZE_CS_MASK) >> DDR_SIZE_CS_SHIFT);
+ }
+
+ return 0;
+}
+
+/* Special function for Dove */
+static int mvebu_sdram_debug_show_dove(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(map & 1)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = map & 0xff800000;
+ size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size, i);
+ }
+
+ return 0;
+}
+
+static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ return mbus->soc->show_cpu_target(mbus, seq, v);
+}
+
+static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvebu_sdram_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_sdram_debug_fops = {
+ .open = mvebu_sdram_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wremap;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled, i;
+ const char *name;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, &wremap);
+
+ if (!enabled) {
+ seq_printf(seq, "[%02d] disabled\n", win);
+ continue;
+ }
+
+
+ for (i = 0; mbus->soc->map[i].name; i++)
+ if (mbus->soc->map[i].target == wtarget &&
+ mbus->soc->map[i].attr ==
+ (wattr & mbus->soc->map[i].attrmask))
+ break;
+
+ name = mbus->soc->map[i].name ?: "unknown";
+
+ seq_printf(seq, "[%02d] %016llx - %016llx : %s",
+ win, (unsigned long long)wbase,
+ (unsigned long long)(wbase + wsize), name);
+
+ if (win < mbus->soc->num_remappable_wins) {
+ seq_printf(seq, " (remap %016llx)\n",
+ (unsigned long long)wremap);
+ } else
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvebu_devs_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_devs_debug_fops = {
+ .open = mvebu_devs_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * SoC-specific functions and definitions
+ */
+
+static unsigned int orion_mbus_win_offset(int win)
+{
+ return win << 4;
+}
+
+static unsigned int armada_370_xp_mbus_win_offset(int win)
+{
+ /* The register layout is a bit annoying and the below code
+ * tries to cope with it.
+ * - At offset 0x0, there are the registers for the first 8
+ * windows, with 4 registers of 32 bits per window (ctrl,
+ * base, remap low, remap high)
+ * - Then at offset 0x80, there is a hole of 0x10 bytes for
+ * the internal registers base address and internal units
+ * sync barrier register.
+ * - Then at offset 0x90, there the registers for 12
+ * windows, with only 2 registers of 32 bits per window
+ * (ctrl, base).
+ */
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x90 + ((win - 8) << 3);
+}
+
+static unsigned int mv78xx0_mbus_win_offset(int win)
+{
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x900 + ((win - 8) << 4);
+}
+
+static void __init
+mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ /*
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
+ */
+ if ((size & DDR_SIZE_ENABLED) &&
+ !(base & DDR_BASE_CS_HIGH_MASK)) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base & DDR_BASE_CS_LOW_MASK;
+ w->size = (size | ~DDR_SIZE_MASK) + 1;
+ }
+ }
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static void __init
+mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ /*
+ * Chip select enabled?
+ */
+ if (map & 1) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0; /* CS address decoding done inside */
+ /* the DDR controller, no need to */
+ /* provide attributes */
+ w->base = map & 0xff800000;
+ w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+ }
+ }
+
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static const struct mvebu_mbus_mapping armada_370_map[] = {
+ MAPDEF("bootrom", 1, 0xe0, MAPDEF_NOMASK),
+ MAPDEF("devbus-boot", 1, 0x2f, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs0", 1, 0x3e, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs1", 1, 0x3d, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs2", 1, 0x3b, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs3", 1, 0x37, MAPDEF_NOMASK),
+ MAPDEF("pcie0.0", 4, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.0", 8, 0xe0, MAPDEF_PCIMASK),
+ {},
+};
+
+static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
+ .num_wins = 20,
+ .num_remappable_wins = 8,
+ .win_cfg_offset = armada_370_xp_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .map = armada_370_map,
+};
+
+static const struct mvebu_mbus_mapping armada_xp_map[] = {
+ MAPDEF("bootrom", 1, 0x1d, MAPDEF_NOMASK),
+ MAPDEF("devbus-boot", 1, 0x2f, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs0", 1, 0x3e, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs1", 1, 0x3d, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs2", 1, 0x3b, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs3", 1, 0x37, MAPDEF_NOMASK),
+ MAPDEF("pcie0.0", 4, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie0.1", 4, 0xd0, MAPDEF_PCIMASK),
+ MAPDEF("pcie0.2", 4, 0xb0, MAPDEF_PCIMASK),
+ MAPDEF("pcie0.3", 4, 0x70, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.0", 8, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.1", 8, 0xd0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.2", 8, 0xb0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.3", 8, 0x70, MAPDEF_PCIMASK),
+ MAPDEF("pcie2.0", 4, 0xf0, MAPDEF_PCIMASK),
+ MAPDEF("pcie3.0", 8, 0xf0, MAPDEF_PCIMASK),
+ {},
+};
+
+static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
+ .num_wins = 20,
+ .num_remappable_wins = 8,
+ .win_cfg_offset = armada_370_xp_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .map = armada_xp_map,
+};
+
+static const struct mvebu_mbus_mapping kirkwood_map[] = {
+ MAPDEF("pcie0.0", 4, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.0", 4, 0xd0, MAPDEF_PCIMASK),
+ MAPDEF("sram", 3, 0x01, MAPDEF_NOMASK),
+ MAPDEF("nand", 1, 0x2f, MAPDEF_NOMASK),
+ {},
+};
+
+static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
+ .num_wins = 8,
+ .num_remappable_wins = 4,
+ .win_cfg_offset = orion_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .map = kirkwood_map,
+};
+
+static const struct mvebu_mbus_mapping dove_map[] = {
+ MAPDEF("pcie0.0", 0x4, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.0", 0x8, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("cesa", 0x3, 0x01, MAPDEF_NOMASK),
+ MAPDEF("bootrom", 0x1, 0xfd, MAPDEF_NOMASK),
+ MAPDEF("scratchpad", 0xd, 0x0, MAPDEF_NOMASK),
+ {},
+};
+
+static const struct mvebu_mbus_soc_data dove_mbus_data = {
+ .num_wins = 8,
+ .num_remappable_wins = 4,
+ .win_cfg_offset = orion_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_dove,
+ .map = dove_map,
+};
+
+static const struct mvebu_mbus_mapping orion5x_map[] = {
+ MAPDEF("pcie0.0", 4, 0x51, MAPDEF_ORIONPCIMASK),
+ MAPDEF("pci0.0", 3, 0x51, MAPDEF_ORIONPCIMASK),
+ MAPDEF("devbus-boot", 1, 0x0f, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs0", 1, 0x1e, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs1", 1, 0x1d, MAPDEF_NOMASK),
+ MAPDEF("devbus-cs2", 1, 0x1b, MAPDEF_NOMASK),
+ MAPDEF("sram", 0, 0x00, MAPDEF_NOMASK),
+ {},
+};
+
+/*
+ * Some variants of Orion5x have 4 remappable windows, some other have
+ * only two of them.
+ */
+static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
+ .num_wins = 8,
+ .num_remappable_wins = 4,
+ .win_cfg_offset = orion_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .map = orion5x_map,
+};
+
+static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
+ .num_wins = 8,
+ .num_remappable_wins = 2,
+ .win_cfg_offset = orion_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .map = orion5x_map,
+};
+
+static const struct mvebu_mbus_mapping mv78xx0_map[] = {
+ MAPDEF("pcie0.0", 4, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie0.1", 4, 0xd0, MAPDEF_PCIMASK),
+ MAPDEF("pcie0.2", 4, 0xb0, MAPDEF_PCIMASK),
+ MAPDEF("pcie0.3", 4, 0x70, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.0", 8, 0xe0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.1", 8, 0xd0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.2", 8, 0xb0, MAPDEF_PCIMASK),
+ MAPDEF("pcie1.3", 8, 0x70, MAPDEF_PCIMASK),
+ MAPDEF("pcie2.0", 4, 0xf0, MAPDEF_PCIMASK),
+ MAPDEF("pcie3.0", 8, 0xf0, MAPDEF_PCIMASK),
+ {},
+};
+
+static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
+ .num_wins = 14,
+ .num_remappable_wins = 8,
+ .win_cfg_offset = mv78xx0_mbus_win_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .map = mv78xx0_map,
+};
+
+/*
+ * The driver doesn't yet have a DT binding because the details of
+ * this DT binding still need to be sorted out. However, as a
+ * preparation, we already use of_device_id to match a SoC description
+ * string against the SoC specific details of this driver.
+ */
+static const struct of_device_id of_mvebu_mbus_ids[] = {
+ { .compatible = "marvell,armada370-mbus",
+ .data = &armada_370_mbus_data, },
+ { .compatible = "marvell,armadaxp-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,kirkwood-mbus",
+ .data = &kirkwood_mbus_data, },
+ { .compatible = "marvell,dove-mbus",
+ .data = &dove_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5281-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5182-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5181-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f6183-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,mv78xx0-mbus",
+ .data = &mv78xx0_mbus_data, },
+ { },
+};
+
+/*
+ * Public API of the driver
+ */
+int mvebu_mbus_add_window_remap_flags(const char *devname, phys_addr_t base,
+ size_t size, phys_addr_t remap,
+ unsigned int flags)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ u8 target, attr;
+ int i;
+
+ if (!s->soc->map)
+ return -ENODEV;
+
+ for (i = 0; s->soc->map[i].name; i++)
+ if (!strcmp(s->soc->map[i].name, devname))
+ break;
+
+ if (!s->soc->map[i].name) {
+ pr_err("mvebu-mbus: unknown device '%s'\n", devname);
+ return -ENODEV;
+ }
+
+ target = s->soc->map[i].target;
+ attr = s->soc->map[i].attr;
+
+ if (flags == MVEBU_MBUS_PCI_MEM)
+ attr |= 0x8;
+ else if (flags == MVEBU_MBUS_PCI_WA)
+ attr |= 0x28;
+
+ if (!mvebu_mbus_window_conflicts(s, base, size, target, attr)) {
+ pr_err("mvebu-mbus: cannot add window '%s', conflicts with another window\n",
+ devname);
+ return -EINVAL;
+ }
+
+ return mvebu_mbus_alloc_window(s, base, size, remap, target, attr);
+
+}
+
+int mvebu_mbus_add_window(const char *devname, phys_addr_t base, size_t size)
+{
+ return mvebu_mbus_add_window_remap_flags(devname, base, size,
+ MVEBU_MBUS_NO_REMAP, 0);
+}
+
+int mvebu_mbus_del_window(phys_addr_t base, size_t size)
+{
+ int win;
+
+ win = mvebu_mbus_find_window(&mbus_state, base, size);
+ if (win < 0)
+ return win;
+
+ mvebu_mbus_disable_window(&mbus_state, win);
+ return 0;
+}
+
+static __init int mvebu_mbus_debugfs_init(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ /*
+ * If no base has been initialized, doesn't make sense to
+ * register the debugfs entries. We may be on a multiplatform
+ * kernel that isn't running a Marvell EBU SoC.
+ */
+ if (!s->mbuswins_base)
+ return 0;
+
+ s->debugfs_root = debugfs_create_dir("mvebu-mbus", NULL);
+ if (s->debugfs_root) {
+ s->debugfs_sdram = debugfs_create_file("sdram", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_sdram_debug_fops);
+ s->debugfs_devs = debugfs_create_file("devices", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_devs_debug_fops);
+ }
+
+ return 0;
+}
+fs_initcall(mvebu_mbus_debugfs_init);
+
+int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ const struct of_device_id *of_id;
+ int win;
+
+ for (of_id = of_mvebu_mbus_ids; of_id->compatible; of_id++)
+ if (!strcmp(of_id->compatible, soc))
+ break;
+
+ if (!of_id->compatible) {
+ pr_err("mvebu-mbus: could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus->soc = of_id->data;
+
+ mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
+ if (!mbus->mbuswins_base)
+ return -ENOMEM;
+
+ mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
+ if (!mbus->sdramwins_base) {
+ iounmap(mbus_state.mbuswins_base);
+ return -ENOMEM;
+ }
+
+ if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
+ mbus->hw_io_coherency = 1;
+
+ for (win = 0; win < mbus->soc->num_wins; win++)
+ mvebu_mbus_disable_window(mbus, win);
+
+ mbus->soc->setup_cpu_target(mbus);
+
+ return 0;
+}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d59cdcb..4afcb65 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -503,12 +503,11 @@ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
+static void gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&gdrom_mutex);
cdrom_release(gd.cd_info, mode);
mutex_unlock(&gdrom_mutex);
- return 0;
}
static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 24ffd8c..0fae529 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -6,6 +6,7 @@
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/mutex.h>
@@ -329,9 +330,7 @@ ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef THERM_USE_PROC
-static int
-proc_therm_ds1620_read(char *buf, char **start, off_t offset,
- int len, int *eof, void *unused)
+static int ds1620_proc_therm_show(struct seq_file *m, void *v)
{
struct therm th;
int temp;
@@ -339,17 +338,25 @@ proc_therm_ds1620_read(char *buf, char **start, off_t offset,
ds1620_read_state(&th);
temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
- len = sprintf(buf, "Thermostat: HI %i.%i, LOW %i.%i; "
- "temperature: %i.%i C, fan %s\n",
- th.hi >> 1, th.hi & 1 ? 5 : 0,
- th.lo >> 1, th.lo & 1 ? 5 : 0,
- temp >> 1, temp & 1 ? 5 : 0,
- fan_state[netwinder_get_fan()]);
+ seq_printf(m, "Thermostat: HI %i.%i, LOW %i.%i; temperature: %i.%i C, fan %s\n",
+ th.hi >> 1, th.hi & 1 ? 5 : 0,
+ th.lo >> 1, th.lo & 1 ? 5 : 0,
+ temp >> 1, temp & 1 ? 5 : 0,
+ fan_state[netwinder_get_fan()]);
+ return 0;
+}
- return len;
+static int ds1620_proc_therm_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ds1620_proc_therm_show, NULL);
}
-static struct proc_dir_entry *proc_therm_ds1620;
+static const struct file_operations ds1620_proc_therm_fops = {
+ .open = ds1620_proc_therm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
static const struct file_operations ds1620_fops = {
@@ -397,10 +404,7 @@ static int __init ds1620_init(void)
return ret;
#ifdef THERM_USE_PROC
- proc_therm_ds1620 = create_proc_entry("therm", 0, NULL);
- if (proc_therm_ds1620)
- proc_therm_ds1620->read_proc = proc_therm_ds1620_read;
- else
+ if (!proc_create("therm", 0, NULL, &ds1620_proc_therm_fops))
printk(KERN_ERR "therm: unable to register /proc/therm\n");
#endif
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index a082d00..e39e740 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
@@ -296,12 +297,10 @@ static struct miscdevice efi_rtc_dev= {
/*
* We export RAW EFI information to /proc/driver/efirtc
*/
-static int
-efi_rtc_get_status(char *buf)
+static int efi_rtc_proc_show(struct seq_file *m, void *v)
{
efi_time_t eft, alm;
efi_time_cap_t cap;
- char *p = buf;
efi_bool_t enabled, pending;
unsigned long flags;
@@ -316,64 +315,63 @@ efi_rtc_get_status(char *buf)
spin_unlock_irqrestore(&efi_rtc_lock,flags);
- p += sprintf(p,
- "Time : %u:%u:%u.%09u\n"
- "Date : %u-%u-%u\n"
- "Daylight : %u\n",
- eft.hour, eft.minute, eft.second, eft.nanosecond,
- eft.year, eft.month, eft.day,
- eft.daylight);
+ seq_printf(m,
+ "Time : %u:%u:%u.%09u\n"
+ "Date : %u-%u-%u\n"
+ "Daylight : %u\n",
+ eft.hour, eft.minute, eft.second, eft.nanosecond,
+ eft.year, eft.month, eft.day,
+ eft.daylight);
if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- p += sprintf(p, "Timezone : unspecified\n");
+ seq_puts(m, "Timezone : unspecified\n");
else
/* XXX fixme: convert to string? */
- p += sprintf(p, "Timezone : %u\n", eft.timezone);
+ seq_printf(m, "Timezone : %u\n", eft.timezone);
- p += sprintf(p,
- "Alarm Time : %u:%u:%u.%09u\n"
- "Alarm Date : %u-%u-%u\n"
- "Alarm Daylight : %u\n"
- "Enabled : %s\n"
- "Pending : %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
+ seq_printf(m,
+ "Alarm Time : %u:%u:%u.%09u\n"
+ "Alarm Date : %u-%u-%u\n"
+ "Alarm Daylight : %u\n"
+ "Enabled : %s\n"
+ "Pending : %s\n",
+ alm.hour, alm.minute, alm.second, alm.nanosecond,
+ alm.year, alm.month, alm.day,
+ alm.daylight,
+ enabled == 1 ? "yes" : "no",
+ pending == 1 ? "yes" : "no");
if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- p += sprintf(p, "Timezone : unspecified\n");
+ seq_puts(m, "Timezone : unspecified\n");
else
/* XXX fixme: convert to string? */
- p += sprintf(p, "Timezone : %u\n", alm.timezone);
+ seq_printf(m, "Timezone : %u\n", alm.timezone);
/*
* now prints the capabilities
*/
- p += sprintf(p,
- "Resolution : %u\n"
- "Accuracy : %u\n"
- "SetstoZero : %u\n",
- cap.resolution, cap.accuracy, cap.sets_to_zero);
+ seq_printf(m,
+ "Resolution : %u\n"
+ "Accuracy : %u\n"
+ "SetstoZero : %u\n",
+ cap.resolution, cap.accuracy, cap.sets_to_zero);
- return p - buf;
+ return 0;
}
-static int
-efi_rtc_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int efi_rtc_proc_open(struct inode *inode, struct file *file)
{
- int len = efi_rtc_get_status(page);
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
+ return single_open(file, efi_rtc_proc_show, NULL);
}
+static const struct file_operations efi_rtc_proc_fops = {
+ .open = efi_rtc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int __init
efi_rtc_init(void)
{
@@ -389,8 +387,7 @@ efi_rtc_init(void)
return ret;
}
- dir = create_proc_read_entry ("driver/efirtc", 0, NULL,
- efi_rtc_read_proc, NULL);
+ dir = proc_create("driver/efirtc", 0, NULL, &efi_rtc_proc_fops);
if (dir == NULL) {
printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n");
misc_deregister(&efi_rtc_dev);
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 21cb980..4f94375 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -52,6 +52,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -386,18 +387,15 @@ static int gen_rtc_release(struct inode *inode, struct file *file)
* Info exported via "/proc/driver/rtc".
*/
-static int gen_rtc_proc_output(char *buf)
+static int gen_rtc_proc_show(struct seq_file *m, void *v)
{
- char *p;
struct rtc_time tm;
unsigned int flags;
struct rtc_pll_info pll;
- p = buf;
-
flags = get_rtc_time(&tm);
- p += sprintf(p,
+ seq_printf(m,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
"rtc_epoch\t: %04u\n",
@@ -406,23 +404,23 @@ static int gen_rtc_proc_output(char *buf)
tm.tm_hour = tm.tm_min = tm.tm_sec = 0;
- p += sprintf(p, "alarm\t\t: ");
+ seq_puts(m, "alarm\t\t: ");
if (tm.tm_hour <= 24)
- p += sprintf(p, "%02d:", tm.tm_hour);
+ seq_printf(m, "%02d:", tm.tm_hour);
else
- p += sprintf(p, "**:");
+ seq_puts(m, "**:");
if (tm.tm_min <= 59)
- p += sprintf(p, "%02d:", tm.tm_min);
+ seq_printf(m, "%02d:", tm.tm_min);
else
- p += sprintf(p, "**:");
+ seq_puts(m, "**:");
if (tm.tm_sec <= 59)
- p += sprintf(p, "%02d\n", tm.tm_sec);
+ seq_printf(m, "%02d\n", tm.tm_sec);
else
- p += sprintf(p, "**\n");
+ seq_puts(m, "**\n");
- p += sprintf(p,
+ seq_printf(m,
"DST_enable\t: %s\n"
"BCD\t\t: %s\n"
"24hr\t\t: %s\n"
@@ -442,7 +440,7 @@ static int gen_rtc_proc_output(char *buf)
0L /* freq */,
(flags & RTC_BATT_BAD) ? "bad" : "okay");
if (!get_rtc_pll(&pll))
- p += sprintf(p,
+ seq_printf(m,
"PLL adjustment\t: %d\n"
"PLL max +ve adjustment\t: %d\n"
"PLL max -ve adjustment\t: %d\n"
@@ -455,26 +453,26 @@ static int gen_rtc_proc_output(char *buf)
pll.pll_posmult,
pll.pll_negmult,
pll.pll_clock);
- return p - buf;
+ return 0;
}
-static int gen_rtc_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int gen_rtc_proc_open(struct inode *inode, struct file *file)
{
- int len = gen_rtc_proc_output (page);
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
+ return single_open(file, gen_rtc_proc_show, NULL);
}
+static const struct file_operations gen_rtc_proc_fops = {
+ .open = gen_rtc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int __init gen_rtc_proc_init(void)
{
struct proc_dir_entry *r;
- r = create_proc_read_entry("driver/rtc", 0, NULL, gen_rtc_read_proc, NULL);
+ r = proc_create("driver/rtc", 0, NULL, &gen_rtc_proc_fops);
if (!r)
return -ENOMEM;
return 0;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c5a0262..2f9dbf7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -86,6 +86,18 @@ config HW_RANDOM_BCM63XX
If unusure, say Y.
+config HW_RANDOM_BCM2835
+ tristate "Broadcom BCM2835 Random Number Generator support"
+ depends on HW_RANDOM && ARCH_BCM2835
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM2835 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm2835-rng
+
+ If unsure, say Y.
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 1fd7eec..bed467c 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
new file mode 100644
index 0000000..eb7f147
--- /dev/null
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -0,0 +1,113 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ * Copyright (c) 2013 Lubomir Rintel
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License ("GPL")
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#define RNG_CTRL 0x0
+#define RNG_STATUS 0x4
+#define RNG_DATA 0x8
+
+/* enable rng */
+#define RNG_RBGEN 0x1
+
+/* the initial numbers generated are "less random" so will be discarded */
+#define RNG_WARMUP_COUNT 0x40000
+
+static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
+ if (!wait)
+ return 0;
+ cpu_relax();
+ }
+
+ *(u32 *)buf = __raw_readl(rng_base + RNG_DATA);
+ return sizeof(u32);
+}
+
+static struct hwrng bcm2835_rng_ops = {
+ .name = "bcm2835",
+ .read = bcm2835_rng_read,
+};
+
+static int bcm2835_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *rng_base;
+ int err;
+
+ /* map peripheral */
+ rng_base = of_iomap(np, 0);
+ if (!rng_base) {
+ dev_err(dev, "failed to remap rng regs");
+ return -ENODEV;
+ }
+ bcm2835_rng_ops.priv = (unsigned long)rng_base;
+
+ /* register driver */
+ err = hwrng_register(&bcm2835_rng_ops);
+ if (err) {
+ dev_err(dev, "hwrng registration failed\n");
+ iounmap(rng_base);
+ } else {
+ dev_info(dev, "hwrng registered\n");
+
+ /* set warm-up count & enable */
+ __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
+ __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+ }
+ return err;
+}
+
+static int bcm2835_rng_remove(struct platform_device *pdev)
+{
+ void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
+
+ /* disable rng hardware */
+ __raw_writel(0, rng_base + RNG_CTRL);
+
+ /* unregister driver */
+ hwrng_unregister(&bcm2835_rng_ops);
+ iounmap(rng_base);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_rng_of_match[] = {
+ { .compatible = "brcm,bcm2835-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+
+static struct platform_driver bcm2835_rng_driver = {
+ .driver = {
+ .name = "bcm2835-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_rng_of_match,
+ },
+ .probe = bcm2835_rng_probe,
+ .remove = bcm2835_rng_remove,
+};
+module_platform_driver(bcm2835_rng_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index ac47631..402ccfb 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -144,6 +144,7 @@ static int exynos_rng_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
static int exynos_rng_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -161,7 +162,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
return clk_prepare_enable(exynos_rng->clk);
}
-
+#endif
static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
exynos_rng_runtime_resume, NULL);
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 895d0b8..4ca35e8 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -142,7 +142,7 @@ static void mxc_rnga_cleanup(struct hwrng *rng)
static int __init mxc_rnga_probe(struct platform_device *pdev)
{
int err = -ENODEV;
- struct resource *res, *mem;
+ struct resource *res;
struct mxc_rng *mxc_rng;
mxc_rng = devm_kzalloc(&pdev->dev, sizeof(struct mxc_rng),
@@ -172,15 +172,9 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
goto err_region;
}
- mem = request_mem_region(res->start, resource_size(res), pdev->name);
- if (mem == NULL) {
- err = -EBUSY;
- goto err_region;
- }
-
- mxc_rng->mem = ioremap(res->start, resource_size(res));
- if (!mxc_rng->mem) {
- err = -ENOMEM;
+ mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mxc_rng->mem)) {
+ err = PTR_ERR(mxc_rng->mem);
goto err_ioremap;
}
@@ -195,8 +189,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
return 0;
err_ioremap:
- release_mem_region(res->start, resource_size(res));
-
err_region:
clk_disable_unprepare(mxc_rng->clk);
@@ -206,15 +198,10 @@ out:
static int __exit mxc_rnga_remove(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
hwrng_unregister(&mxc_rng->rng);
- iounmap(mxc_rng->mem);
-
- release_mem_region(res->start, resource_size(res));
-
clk_disable_unprepare(mxc_rng->clk);
return 0;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 849db19..3e75737 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -23,127 +23,209 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/timeriomem-rng.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/completion.h>
-static struct timeriomem_rng_data *timeriomem_rng_data;
+struct timeriomem_rng_private_data {
+ void __iomem *io_base;
+ unsigned int expires;
+ unsigned int period;
+ unsigned int present:1;
-static void timeriomem_rng_trigger(unsigned long);
-static DEFINE_TIMER(timeriomem_rng_timer, timeriomem_rng_trigger, 0, 0);
+ struct timer_list timer;
+ struct completion completion;
+
+ struct hwrng timeriomem_rng_ops;
+};
+
+#define to_rng_priv(rng) \
+ ((struct timeriomem_rng_private_data *)rng->priv)
/*
* have data return 1, however return 0 if we have nothing
*/
static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
{
- if (rng->priv == 0)
- return 1;
+ struct timeriomem_rng_private_data *priv = to_rng_priv(rng);
- if (!wait || timeriomem_rng_data->present)
- return timeriomem_rng_data->present;
+ if (!wait || priv->present)
+ return priv->present;
- wait_for_completion(&timeriomem_rng_data->completion);
+ wait_for_completion(&priv->completion);
return 1;
}
static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
{
+ struct timeriomem_rng_private_data *priv = to_rng_priv(rng);
unsigned long cur;
s32 delay;
- *data = readl(timeriomem_rng_data->address);
+ *data = readl(priv->io_base);
- if (rng->priv != 0) {
- cur = jiffies;
+ cur = jiffies;
- delay = cur - timeriomem_rng_timer.expires;
- delay = rng->priv - (delay % rng->priv);
+ delay = cur - priv->expires;
+ delay = priv->period - (delay % priv->period);
- timeriomem_rng_timer.expires = cur + delay;
- timeriomem_rng_data->present = 0;
+ priv->expires = cur + delay;
+ priv->present = 0;
- init_completion(&timeriomem_rng_data->completion);
- add_timer(&timeriomem_rng_timer);
- }
+ INIT_COMPLETION(priv->completion);
+ mod_timer(&priv->timer, priv->expires);
return 4;
}
-static void timeriomem_rng_trigger(unsigned long dummy)
+static void timeriomem_rng_trigger(unsigned long data)
{
- timeriomem_rng_data->present = 1;
- complete(&timeriomem_rng_data->completion);
-}
+ struct timeriomem_rng_private_data *priv
+ = (struct timeriomem_rng_private_data *)data;
-static struct hwrng timeriomem_rng_ops = {
- .name = "timeriomem",
- .data_present = timeriomem_rng_data_present,
- .data_read = timeriomem_rng_data_read,
- .priv = 0,
-};
+ priv->present = 1;
+ complete(&priv->completion);
+}
static int timeriomem_rng_probe(struct platform_device *pdev)
{
+ struct timeriomem_rng_data *pdata = pdev->dev.platform_data;
+ struct timeriomem_rng_private_data *priv;
struct resource *res;
- int ret;
+ int err = 0;
+ int period;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pdev->dev.of_node && !pdata) {
+ dev_err(&pdev->dev, "timeriomem_rng_data is missing\n");
+ return -EINVAL;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
- return -ENOENT;
+ return -ENXIO;
- timeriomem_rng_data = pdev->dev.platform_data;
+ if (res->start % 4 != 0 || resource_size(res) != 4) {
+ dev_err(&pdev->dev,
+ "address must be four bytes wide and aligned\n");
+ return -EINVAL;
+ }
- timeriomem_rng_data->address = ioremap(res->start, resource_size(res));
- if (!timeriomem_rng_data->address)
- return -EIO;
+ /* Allocate memory for the device structure (and zero it) */
+ priv = kzalloc(sizeof(struct timeriomem_rng_private_data), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ if (pdev->dev.of_node) {
+ int i;
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "period", &i))
+ period = i;
+ else {
+ dev_err(&pdev->dev, "missing period\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ } else
+ period = pdata->period;
+
+ priv->period = usecs_to_jiffies(period);
+ if (priv->period < 1) {
+ dev_err(&pdev->dev, "period is less than one jiffy\n");
+ err = -EINVAL;
+ goto out_free;
+ }
- if (timeriomem_rng_data->period != 0
- && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
- timeriomem_rng_timer.expires = jiffies;
+ priv->expires = jiffies;
+ priv->present = 1;
- timeriomem_rng_ops.priv = usecs_to_jiffies(
- timeriomem_rng_data->period);
+ init_completion(&priv->completion);
+ complete(&priv->completion);
+
+ setup_timer(&priv->timer, timeriomem_rng_trigger, (unsigned long)priv);
+
+ priv->timeriomem_rng_ops.name = dev_name(&pdev->dev);
+ priv->timeriomem_rng_ops.data_present = timeriomem_rng_data_present;
+ priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read;
+ priv->timeriomem_rng_ops.priv = (unsigned long)priv;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto out_timer;
}
- timeriomem_rng_data->present = 1;
- ret = hwrng_register(&timeriomem_rng_ops);
- if (ret)
- goto failed;
+ priv->io_base = ioremap(res->start, resource_size(res));
+ if (priv->io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_release_io;
+ }
+
+ err = hwrng_register(&priv->timeriomem_rng_ops);
+ if (err) {
+ dev_err(&pdev->dev, "problem registering\n");
+ goto out;
+ }
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
- timeriomem_rng_data->address,
- timeriomem_rng_data->period);
+ priv->io_base, period);
return 0;
-failed:
- dev_err(&pdev->dev, "problem registering\n");
- iounmap(timeriomem_rng_data->address);
-
- return ret;
+out:
+ iounmap(priv->io_base);
+out_release_io:
+ release_mem_region(res->start, resource_size(res));
+out_timer:
+ del_timer_sync(&priv->timer);
+out_free:
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ return err;
}
static int timeriomem_rng_remove(struct platform_device *pdev)
{
- del_timer_sync(&timeriomem_rng_timer);
- hwrng_unregister(&timeriomem_rng_ops);
+ struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(timeriomem_rng_data->address);
+ hwrng_unregister(&priv->timeriomem_rng_ops);
+
+ del_timer_sync(&priv->timer);
+ iounmap(priv->io_base);
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
return 0;
}
+static const struct of_device_id timeriomem_rng_match[] = {
+ { .compatible = "timeriomem_rng" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, timeriomem_rng_match);
+
static struct platform_driver timeriomem_rng_driver = {
.driver = {
.name = "timeriomem_rng",
.owner = THIS_MODULE,
+ .of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
.remove = timeriomem_rng_remove,
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 6bf4d47..ef46a9c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0)
+ if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 053201b..4d439d2 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1917,7 +1917,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
+ return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_ipmb_proc_ops = {
@@ -1938,7 +1938,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
static int smi_version_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_version_proc_show, PDE(inode)->data);
+ return single_open(file, smi_version_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_version_proc_ops = {
@@ -2013,7 +2013,7 @@ static int smi_stats_proc_show(struct seq_file *m, void *v)
static int smi_stats_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_stats_proc_show, PDE(inode)->data);
+ return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_stats_proc_ops = {
@@ -4541,7 +4541,7 @@ static void __exit cleanup_ipmi(void)
del_timer_sync(&ipmi_timer);
#ifdef CONFIG_PROC_FS
- remove_proc_entry(proc_ipmi_root->name, NULL);
+ proc_remove(proc_ipmi_root);
#endif /* CONFIG_PROC_FS */
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 0ac9b45..313538a 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2839,7 +2839,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
static int smi_type_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_type_proc_show, PDE(inode)->data);
+ return single_open(file, smi_type_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_type_proc_ops = {
@@ -2882,7 +2882,7 @@ static int smi_si_stats_proc_show(struct seq_file *m, void *v)
static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_si_stats_proc_show, PDE(inode)->data);
+ return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_si_stats_proc_ops = {
@@ -2910,7 +2910,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
static int smi_params_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_params_proc_show, PDE(inode)->data);
+ return single_open(file, smi_params_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_params_proc_ops = {
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 2c644af..1ccbe94 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -28,6 +28,7 @@
#include <linux/pfn.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/aio.h>
#include <asm/uaccess.h>
@@ -627,6 +628,18 @@ static ssize_t write_null(struct file *file, const char __user *buf,
return count;
}
+static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ return 0;
+}
+
+static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ return iov_length(iov, nr_segs);
+}
+
static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
struct splice_desc *sd)
{
@@ -670,6 +683,24 @@ static ssize_t read_zero(struct file *file, char __user *buf,
return written ? written : -EFAULT;
}
+static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ size_t written = 0;
+ unsigned long i;
+ ssize_t ret;
+
+ for (i = 0; i < nr_segs; i++) {
+ ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
+ &pos);
+ if (ret < 0)
+ break;
+ written += ret;
+ }
+
+ return written ? written : -EFAULT;
+}
+
static int mmap_zero(struct file *file, struct vm_area_struct *vma)
{
#ifndef CONFIG_MMU
@@ -738,6 +769,7 @@ static int open_port(struct inode *inode, struct file *filp)
#define full_lseek null_lseek
#define write_zero write_null
#define read_full read_zero
+#define aio_write_zero aio_write_null
#define open_mem open_port
#define open_kmem open_mem
#define open_oldmem open_mem
@@ -766,6 +798,8 @@ static const struct file_operations null_fops = {
.llseek = null_lseek,
.read = read_null,
.write = write_null,
+ .aio_read = aio_read_null,
+ .aio_write = aio_write_null,
.splice_write = splice_write_null,
};
@@ -782,6 +816,8 @@ static const struct file_operations zero_fops = {
.llseek = zero_lseek,
.read = read_zero,
.write = write_zero,
+ .aio_read = aio_read_zero,
+ .aio_write = aio_write_zero,
.mmap = mmap_zero,
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ce5f3fc..1b456fe 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -78,8 +78,8 @@ struct ports_driver_data {
};
static struct ports_driver_data pdrvdata;
-DEFINE_SPINLOCK(pdrvdata_lock);
-DECLARE_COMPLETION(early_console_added);
+static DEFINE_SPINLOCK(pdrvdata_lock);
+static DECLARE_COMPLETION(early_console_added);
/* This struct holds information that's relevant only for console ports */
struct console {
@@ -503,7 +503,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
+ ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
if (!ret)
ret = vq->num_free;
@@ -572,7 +572,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
sg_init_one(sg, &cpkt, sizeof(cpkt));
spin_lock(&portdev->c_ovq_lock);
- if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
+ if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
@@ -622,7 +622,7 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
reclaim_consumed_buffers(port);
- err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC);
+ err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
@@ -1040,7 +1040,7 @@ static int port_fops_open(struct inode *inode, struct file *filp)
spin_lock_irq(&port->inbuf_lock);
if (port->guest_connected) {
spin_unlock_irq(&port->inbuf_lock);
- ret = -EMFILE;
+ ret = -EBUSY;
goto out;
}
@@ -1202,7 +1202,7 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
return hvc_instantiate(0, 0, &hv_ops);
}
-int init_port_console(struct port *port)
+static int init_port_console(struct port *port)
{
int ret;
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index e7f7fe9..137d3e7 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
obj-$(CONFIG_X86) += x86/
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index ade4358..d1f1a19 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -221,7 +221,7 @@ void __init mmp2_clk_init(void)
clk = mmp_clk_register_apbc("gpio", "vctcxo",
apbc_base + APBC_GPIO, 10, 0, &clk_lock);
- clk_register_clkdev(clk, NULL, "pxa-gpio");
+ clk_register_clkdev(clk, NULL, "mmp2-gpio");
clk = mmp_clk_register_apbc("kpc", "clk32",
apbc_base + APBC_KPC, 10, 0, &clk_lock);
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index e8d036c..28b3b51 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -172,7 +172,7 @@ void __init pxa168_clk_init(void)
clk = mmp_clk_register_apbc("gpio", "vctcxo",
apbc_base + APBC_GPIO, 10, 0, &clk_lock);
- clk_register_clkdev(clk, NULL, "pxa-gpio");
+ clk_register_clkdev(clk, NULL, "mmp-gpio");
clk = mmp_clk_register_apbc("kpc", "clk32",
apbc_base + APBC_KPC, 10, 0, &clk_lock);
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 7048c31..6ec0569 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -177,7 +177,7 @@ void __init pxa910_clk_init(void)
clk = mmp_clk_register_apbc("gpio", "vctcxo",
apbc_base + APBC_GPIO, 10, 0, &clk_lock);
- clk_register_clkdev(clk, NULL, "pxa-gpio");
+ clk_register_clkdev(clk, NULL, "mmp-gpio");
clk = mmp_clk_register_apbc("kpc", "clk32",
apbc_base + APBC_KPC, 10, 0, &clk_lock);
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index b5c06f9..f6a7487 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -15,12 +15,15 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <mach/common.h>
-#include <mach/mx23.h>
+#include <linux/of_address.h>
#include "clk.h"
-#define DIGCTRL MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
-#define CLKCTRL MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
+static void __iomem *clkctrl;
+static void __iomem *digctrl;
+
+#define CLKCTRL clkctrl
+#define DIGCTRL digctrl
+
#define PLLCTRL0 (CLKCTRL + 0x0000)
#define CPU (CLKCTRL + 0x0020)
#define HBUS (CLKCTRL + 0x0030)
@@ -48,10 +51,10 @@ static void __init clk_misc_init(void)
u32 val;
/* Gate off cpu clock in WFI for power saving */
- __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET);
/* Clear BYPASS for SAIF */
- __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
+ writel_relaxed(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ + CLR);
/* SAIF has to use frac div for functional operation */
val = readl_relaxed(SAIF);
@@ -62,14 +65,14 @@ static void __init clk_misc_init(void)
* Source ssp clock from ref_io than ref_xtal,
* as ref_xtal only provides 24 MHz as maximum.
*/
- __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
+ writel_relaxed(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ + CLR);
/*
* 480 MHz seems too high to be ssp clock source directly,
* so set frac to get a 288 MHz ref_io.
*/
- __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
- __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
+ writel_relaxed(0x3f << BP_FRAC_IOFRAC, FRAC + CLR);
+ writel_relaxed(30 << BP_FRAC_IOFRAC, FRAC + SET);
}
static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
@@ -101,6 +104,14 @@ int __init mx23_clocks_init(void)
struct device_node *np;
u32 i;
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx23-digctl");
+ digctrl = of_iomap(np, 0);
+ WARN_ON(!digctrl);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx23-clkctrl");
+ clkctrl = of_iomap(np, 0);
+ WARN_ON(!clkctrl);
+
clk_misc_init();
clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
@@ -153,19 +164,12 @@ int __init mx23_clocks_init(void)
return PTR_ERR(clks[i]);
}
- np = of_find_compatible_node(NULL, NULL, "fsl,imx23-clkctrl");
- if (np) {
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- }
-
- clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
- mxs_timer_init();
-
return 0;
}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 76ce6c6..d0e5eed 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -15,11 +15,12 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <mach/common.h>
-#include <mach/mx28.h>
+#include <linux/of_address.h>
#include "clk.h"
-#define CLKCTRL MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
+static void __iomem *clkctrl;
+#define CLKCTRL clkctrl
+
#define PLL0CTRL0 (CLKCTRL + 0x0000)
#define PLL1CTRL0 (CLKCTRL + 0x0020)
#define PLL2CTRL0 (CLKCTRL + 0x0040)
@@ -53,7 +54,8 @@
#define BP_FRAC0_IO1FRAC 16
#define BP_FRAC0_IO0FRAC 24
-#define DIGCTRL MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
+static void __iomem *digctrl;
+#define DIGCTRL digctrl
#define BP_SAIF_CLKMUX 10
/*
@@ -72,8 +74,8 @@ int mxs_saif_clkmux_select(unsigned int clkmux)
if (clkmux > 0x3)
return -EINVAL;
- __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
- __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
+ writel_relaxed(0x3 << BP_SAIF_CLKMUX, DIGCTRL + CLR);
+ writel_relaxed(clkmux << BP_SAIF_CLKMUX, DIGCTRL + SET);
return 0;
}
@@ -83,13 +85,13 @@ static void __init clk_misc_init(void)
u32 val;
/* Gate off cpu clock in WFI for power saving */
- __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET);
/* 0 is a bad default value for a divider */
- __mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
+ writel_relaxed(1 << BP_ENET_DIV_TIME, ENET + SET);
/* Clear BYPASS for SAIF */
- __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
+ writel_relaxed(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ + CLR);
/* SAIF has to use frac div for functional operation */
val = readl_relaxed(SAIF0);
@@ -109,7 +111,7 @@ static void __init clk_misc_init(void)
* Source ssp clock from ref_io than ref_xtal,
* as ref_xtal only provides 24 MHz as maximum.
*/
- __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
+ writel_relaxed(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ + CLR);
/*
* 480 MHz seems too high to be ssp clock source directly,
@@ -156,6 +158,14 @@ int __init mx28_clocks_init(void)
struct device_node *np;
u32 i;
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl");
+ digctrl = of_iomap(np, 0);
+ WARN_ON(!digctrl);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx28-clkctrl");
+ clkctrl = of_iomap(np, 0);
+ WARN_ON(!clkctrl);
+
clk_misc_init();
clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
@@ -231,20 +241,14 @@ int __init mx28_clocks_init(void)
return PTR_ERR(clks[i]);
}
- np = of_find_compatible_node(NULL, NULL, "fsl,imx28-clkctrl");
- if (np) {
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- }
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- clk_register_clkdev(clks[xbus], NULL, "timrot");
clk_register_clkdev(clks[enet_out], NULL, "enet_out");
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
- mxs_timer_init();
-
return 0;
}
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
new file mode 100644
index 0000000..b7c232e
--- /dev/null
+++ b/drivers/clk/samsung/Makefile
@@ -0,0 +1,8 @@
+#
+# Samsung Clock specific Makefile
+#
+
+obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o
+obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
+obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
+obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
new file mode 100644
index 0000000..d0940e6
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -0,0 +1,1082 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all Exynos4 SoCs.
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+/* Exynos4 clock controller register offsets */
+#define SRC_LEFTBUS 0x4200
+#define DIV_LEFTBUS 0x4500
+#define GATE_IP_LEFTBUS 0x4800
+#define E4X12_GATE_IP_IMAGE 0x4930
+#define SRC_RIGHTBUS 0x8200
+#define DIV_RIGHTBUS 0x8500
+#define GATE_IP_RIGHTBUS 0x8800
+#define E4X12_GATE_IP_PERIR 0x8960
+#define EPLL_LOCK 0xc010
+#define VPLL_LOCK 0xc020
+#define EPLL_CON0 0xc110
+#define EPLL_CON1 0xc114
+#define EPLL_CON2 0xc118
+#define VPLL_CON0 0xc120
+#define VPLL_CON1 0xc124
+#define VPLL_CON2 0xc128
+#define SRC_TOP0 0xc210
+#define SRC_TOP1 0xc214
+#define SRC_CAM 0xc220
+#define SRC_TV 0xc224
+#define SRC_MFC 0xcc28
+#define SRC_G3D 0xc22c
+#define E4210_SRC_IMAGE 0xc230
+#define SRC_LCD0 0xc234
+#define E4210_SRC_LCD1 0xc238
+#define E4X12_SRC_ISP 0xc238
+#define SRC_MAUDIO 0xc23c
+#define SRC_FSYS 0xc240
+#define SRC_PERIL0 0xc250
+#define SRC_PERIL1 0xc254
+#define E4X12_SRC_CAM1 0xc258
+#define SRC_MASK_TOP 0xc310
+#define SRC_MASK_CAM 0xc320
+#define SRC_MASK_TV 0xc324
+#define SRC_MASK_LCD0 0xc334
+#define E4210_SRC_MASK_LCD1 0xc338
+#define E4X12_SRC_MASK_ISP 0xc338
+#define SRC_MASK_MAUDIO 0xc33c
+#define SRC_MASK_FSYS 0xc340
+#define SRC_MASK_PERIL0 0xc350
+#define SRC_MASK_PERIL1 0xc354
+#define DIV_TOP 0xc510
+#define DIV_CAM 0xc520
+#define DIV_TV 0xc524
+#define DIV_MFC 0xc528
+#define DIV_G3D 0xc52c
+#define DIV_IMAGE 0xc530
+#define DIV_LCD0 0xc534
+#define E4210_DIV_LCD1 0xc538
+#define E4X12_DIV_ISP 0xc538
+#define DIV_MAUDIO 0xc53c
+#define DIV_FSYS0 0xc540
+#define DIV_FSYS1 0xc544
+#define DIV_FSYS2 0xc548
+#define DIV_FSYS3 0xc54c
+#define DIV_PERIL0 0xc550
+#define DIV_PERIL1 0xc554
+#define DIV_PERIL2 0xc558
+#define DIV_PERIL3 0xc55c
+#define DIV_PERIL4 0xc560
+#define DIV_PERIL5 0xc564
+#define E4X12_DIV_CAM1 0xc568
+#define GATE_SCLK_CAM 0xc820
+#define GATE_IP_CAM 0xc920
+#define GATE_IP_TV 0xc924
+#define GATE_IP_MFC 0xc928
+#define GATE_IP_G3D 0xc92c
+#define E4210_GATE_IP_IMAGE 0xc930
+#define GATE_IP_LCD0 0xc934
+#define E4210_GATE_IP_LCD1 0xc938
+#define E4X12_GATE_IP_ISP 0xc938
+#define E4X12_GATE_IP_MAUDIO 0xc93c
+#define GATE_IP_FSYS 0xc940
+#define GATE_IP_GPS 0xc94c
+#define GATE_IP_PERIL 0xc950
+#define E4210_GATE_IP_PERIR 0xc960
+#define GATE_BLOCK 0xc970
+#define E4X12_MPLL_CON0 0x10108
+#define SRC_DMC 0x10200
+#define SRC_MASK_DMC 0x10300
+#define DIV_DMC0 0x10500
+#define DIV_DMC1 0x10504
+#define GATE_IP_DMC 0x10900
+#define APLL_CON0 0x14100
+#define E4210_MPLL_CON0 0x14108
+#define SRC_CPU 0x14200
+#define DIV_CPU0 0x14500
+#define DIV_CPU1 0x14504
+#define GATE_SCLK_CPU 0x14800
+#define GATE_IP_CPU 0x14900
+#define E4X12_DIV_ISP0 0x18300
+#define E4X12_DIV_ISP1 0x18304
+#define E4X12_GATE_ISP0 0x18800
+#define E4X12_GATE_ISP1 0x18804
+
+/* the exynos4 soc type */
+enum exynos4_soc {
+ EXYNOS4210,
+ EXYNOS4X12,
+};
+
+/*
+ * Let each supported clock get a unique id. This id is used to lookup the clock
+ * for device tree based platforms. The clocks are categorized into three
+ * sections: core, sclk gate and bus interface gate clocks.
+ *
+ * When adding a new clock to this list, it is advised to choose a clock
+ * category and add it to the end of that category. That is because the the
+ * device tree source file is referring to these ids and any change in the
+ * sequence number of existing clocks will require corresponding change in the
+ * device tree files. This limitation would go away when pre-processor support
+ * for dtc would be available.
+ */
+enum exynos4_clks {
+ none,
+
+ /* core clocks */
+ xxti, xusbxti, fin_pll, fout_apll, fout_mpll, fout_epll, fout_vpll,
+ sclk_apll, sclk_mpll, sclk_epll, sclk_vpll, arm_clk, aclk200, aclk100,
+ aclk160, aclk133, mout_mpll_user_t, mout_mpll_user_c, mout_core,
+ mout_apll, /* 20 */
+
+ /* gate for special clocks (sclk) */
+ sclk_fimc0 = 128, sclk_fimc1, sclk_fimc2, sclk_fimc3, sclk_cam0,
+ sclk_cam1, sclk_csis0, sclk_csis1, sclk_hdmi, sclk_mixer, sclk_dac,
+ sclk_pixel, sclk_fimd0, sclk_mdnie0, sclk_mdnie_pwm0, sclk_mipi0,
+ sclk_audio0, sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_mmc4,
+ sclk_sata, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_uart4,
+ sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
+ sclk_slimbus, sclk_fimd1, sclk_mipi1, sclk_pcm1, sclk_pcm2, sclk_i2s1,
+ sclk_i2s2, sclk_mipihsi, sclk_mfc, sclk_pcm0, sclk_g3d, sclk_pwm_isp,
+ sclk_spi0_isp, sclk_spi1_isp, sclk_uart_isp,
+
+ /* gate clocks */
+ fimc0 = 256, fimc1, fimc2, fimc3, csis0, csis1, jpeg, smmu_fimc0,
+ smmu_fimc1, smmu_fimc2, smmu_fimc3, smmu_jpeg, vp, mixer, tvenc, hdmi,
+ smmu_tv, mfc, smmu_mfcl, smmu_mfcr, g3d, g2d, rotator, mdma, smmu_g2d,
+ smmu_rotator, smmu_mdma, fimd0, mie0, mdnie0, dsim0, smmu_fimd0, fimd1,
+ mie1, dsim1, smmu_fimd1, pdma0, pdma1, pcie_phy, sata_phy, tsi, sdmmc0,
+ sdmmc1, sdmmc2, sdmmc3, sdmmc4, sata, sromc, usb_host, usb_device, pcie,
+ onenand, nfcon, smmu_pcie, gps, smmu_gps, uart0, uart1, uart2, uart3,
+ uart4, i2c0, i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, tsadc,
+ spi0, spi1, spi2, i2s1, i2s2, pcm0, i2s0, pcm1, pcm2, pwm, slimbus,
+ spdif, ac97, modemif, chipid, sysreg, hdmi_cec, mct, wdt, rtc, keyif,
+ audss, mipi_hsi, mdma2, pixelasyncm0, pixelasyncm1, fimc_lite0,
+ fimc_lite1, ppmuispx, ppmuispmx, fimc_isp, fimc_drc, fimc_fd, mcuisp,
+ gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp,
+ mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp,
+ asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk,
+ spi1_isp_sclk, uart_isp_sclk,
+
+ /* mux clocks */
+ mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0,
+ mout_cam1, mout_csis0, mout_csis1, mout_g3d0, mout_g3d1, mout_g3d,
+ aclk400_mcuisp,
+
+ /* div clocks */
+ div_isp0 = 450, div_isp1, div_mcuisp0, div_mcuisp1, div_aclk200,
+ div_aclk400_mcuisp,
+
+ nr_clks,
+};
+
+/*
+ * list of controller registers to be saved and restored during a
+ * suspend/resume cycle.
+ */
+static __initdata unsigned long exynos4210_clk_save[] = {
+ E4210_SRC_IMAGE,
+ E4210_SRC_LCD1,
+ E4210_SRC_MASK_LCD1,
+ E4210_DIV_LCD1,
+ E4210_GATE_IP_IMAGE,
+ E4210_GATE_IP_LCD1,
+ E4210_GATE_IP_PERIR,
+ E4210_MPLL_CON0,
+};
+
+static __initdata unsigned long exynos4x12_clk_save[] = {
+ E4X12_GATE_IP_IMAGE,
+ E4X12_GATE_IP_PERIR,
+ E4X12_SRC_CAM1,
+ E4X12_DIV_ISP,
+ E4X12_DIV_CAM1,
+ E4X12_MPLL_CON0,
+};
+
+static __initdata unsigned long exynos4_clk_regs[] = {
+ SRC_LEFTBUS,
+ DIV_LEFTBUS,
+ GATE_IP_LEFTBUS,
+ SRC_RIGHTBUS,
+ DIV_RIGHTBUS,
+ GATE_IP_RIGHTBUS,
+ EPLL_CON0,
+ EPLL_CON1,
+ EPLL_CON2,
+ VPLL_CON0,
+ VPLL_CON1,
+ VPLL_CON2,
+ SRC_TOP0,
+ SRC_TOP1,
+ SRC_CAM,
+ SRC_TV,
+ SRC_MFC,
+ SRC_G3D,
+ SRC_LCD0,
+ SRC_MAUDIO,
+ SRC_FSYS,
+ SRC_PERIL0,
+ SRC_PERIL1,
+ SRC_MASK_TOP,
+ SRC_MASK_CAM,
+ SRC_MASK_TV,
+ SRC_MASK_LCD0,
+ SRC_MASK_MAUDIO,
+ SRC_MASK_FSYS,
+ SRC_MASK_PERIL0,
+ SRC_MASK_PERIL1,
+ DIV_TOP,
+ DIV_CAM,
+ DIV_TV,
+ DIV_MFC,
+ DIV_G3D,
+ DIV_IMAGE,
+ DIV_LCD0,
+ DIV_MAUDIO,
+ DIV_FSYS0,
+ DIV_FSYS1,
+ DIV_FSYS2,
+ DIV_FSYS3,
+ DIV_PERIL0,
+ DIV_PERIL1,
+ DIV_PERIL2,
+ DIV_PERIL3,
+ DIV_PERIL4,
+ DIV_PERIL5,
+ GATE_SCLK_CAM,
+ GATE_IP_CAM,
+ GATE_IP_TV,
+ GATE_IP_MFC,
+ GATE_IP_G3D,
+ GATE_IP_LCD0,
+ GATE_IP_FSYS,
+ GATE_IP_GPS,
+ GATE_IP_PERIL,
+ GATE_BLOCK,
+ SRC_MASK_DMC,
+ SRC_DMC,
+ DIV_DMC0,
+ DIV_DMC1,
+ GATE_IP_DMC,
+ APLL_CON0,
+ SRC_CPU,
+ DIV_CPU0,
+ DIV_CPU1,
+ GATE_SCLK_CPU,
+ GATE_IP_CPU,
+};
+
+/* list of all parent clock list */
+PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
+PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
+PNAME(mout_epll_p) = { "fin_pll", "fout_epll", };
+PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi24m", };
+PNAME(mout_vpll_p) = { "fin_pll", "fout_vpll", };
+PNAME(sclk_evpll_p) = { "sclk_epll", "sclk_vpll", };
+PNAME(mout_mfc_p) = { "mout_mfc0", "mout_mfc1", };
+PNAME(mout_g3d_p) = { "mout_g3d0", "mout_g3d1", };
+PNAME(mout_g2d_p) = { "mout_g2d0", "mout_g2d1", };
+PNAME(mout_hdmi_p) = { "sclk_pixel", "sclk_hdmiphy", };
+PNAME(mout_jpeg_p) = { "mout_jpeg0", "mout_jpeg1", };
+PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
+ "spdif_extclk", };
+PNAME(mout_onenand_p) = {"aclk133", "aclk160", };
+PNAME(mout_onenand1_p) = {"mout_onenand", "sclk_vpll", };
+
+/* Exynos 4210-specific parent groups */
+PNAME(sclk_vpll_p4210) = { "mout_vpllsrc", "fout_vpll", };
+PNAME(mout_core_p4210) = { "mout_apll", "sclk_mpll", };
+PNAME(sclk_ampll_p4210) = { "sclk_mpll", "sclk_apll", };
+PNAME(group1_p4210) = { "xxti", "xusbxti", "sclk_hdmi24m",
+ "sclk_usbphy0", "none", "sclk_hdmiphy",
+ "sclk_mpll", "sclk_epll", "sclk_vpll", };
+PNAME(mout_audio0_p4210) = { "cdclk0", "none", "sclk_hdmi24m",
+ "sclk_usbphy0", "xxti", "xusbxti", "sclk_mpll",
+ "sclk_epll", "sclk_vpll" };
+PNAME(mout_audio1_p4210) = { "cdclk1", "none", "sclk_hdmi24m",
+ "sclk_usbphy0", "xxti", "xusbxti", "sclk_mpll",
+ "sclk_epll", "sclk_vpll", };
+PNAME(mout_audio2_p4210) = { "cdclk2", "none", "sclk_hdmi24m",
+ "sclk_usbphy0", "xxti", "xusbxti", "sclk_mpll",
+ "sclk_epll", "sclk_vpll", };
+PNAME(mout_mixer_p4210) = { "sclk_dac", "sclk_hdmi", };
+PNAME(mout_dac_p4210) = { "sclk_vpll", "sclk_hdmiphy", };
+
+/* Exynos 4x12-specific parent groups */
+PNAME(mout_mpll_user_p4x12) = { "fin_pll", "sclk_mpll", };
+PNAME(mout_core_p4x12) = { "mout_apll", "mout_mpll_user_c", };
+PNAME(sclk_ampll_p4x12) = { "mout_mpll_user_t", "sclk_apll", };
+PNAME(group1_p4x12) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
+ "none", "sclk_hdmiphy", "mout_mpll_user_t",
+ "sclk_epll", "sclk_vpll", };
+PNAME(mout_audio0_p4x12) = { "cdclk0", "none", "sclk_hdmi24m",
+ "sclk_usbphy0", "xxti", "xusbxti",
+ "mout_mpll_user_t", "sclk_epll", "sclk_vpll" };
+PNAME(mout_audio1_p4x12) = { "cdclk1", "none", "sclk_hdmi24m",
+ "sclk_usbphy0", "xxti", "xusbxti",
+ "mout_mpll_user_t", "sclk_epll", "sclk_vpll", };
+PNAME(mout_audio2_p4x12) = { "cdclk2", "none", "sclk_hdmi24m",
+ "sclk_usbphy0", "xxti", "xusbxti",
+ "mout_mpll_user_t", "sclk_epll", "sclk_vpll", };
+PNAME(aclk_p4412) = { "mout_mpll_user_t", "sclk_apll", };
+PNAME(mout_user_aclk400_mcuisp_p4x12) = {"fin_pll", "div_aclk400_mcuisp", };
+PNAME(mout_user_aclk200_p4x12) = {"fin_pll", "div_aclk200", };
+PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
+
+/* fixed rate clocks generated outside the soc */
+struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
+ FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0),
+};
+
+/* fixed rate clocks generated inside the soc */
+struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
+ FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
+};
+
+struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
+ FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+};
+
+/* list of mux clocks supported in all exynos4 soc's */
+struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
+ MUX_F(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
+ MUX(none, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
+ MUX(none, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
+ MUX_F(mout_g3d1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX_F(mout_g3d, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
+ MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
+ MUX_A(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1, "sclk_epll"),
+ MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
+};
+
+/* list of mux clocks supported in exynos4210 soc */
+struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+ MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
+ MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
+ MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
+ MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
+ MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+ MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
+ MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
+ MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
+ MUX(none, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1),
+ MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
+ MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
+ MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
+ MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1, "sclk_mpll"),
+ MUX_A(mout_core, "mout_core", mout_core_p4210,
+ SRC_CPU, 16, 1, "mout_core"),
+ MUX_A(sclk_vpll, "sclk_vpll", sclk_vpll_p4210,
+ SRC_TOP0, 8, 1, "sclk_vpll"),
+ MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
+ MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
+ MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
+ MUX(mout_fimc3, "mout_fimc3", group1_p4210, SRC_CAM, 12, 4),
+ MUX(mout_cam0, "mout_cam0", group1_p4210, SRC_CAM, 16, 4),
+ MUX(mout_cam1, "mout_cam1", group1_p4210, SRC_CAM, 20, 4),
+ MUX(mout_csis0, "mout_csis0", group1_p4210, SRC_CAM, 24, 4),
+ MUX(mout_csis1, "mout_csis1", group1_p4210, SRC_CAM, 28, 4),
+ MUX(none, "mout_mfc0", sclk_ampll_p4210, SRC_MFC, 0, 1),
+ MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4210, SRC_G3D, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(none, "mout_fimd0", group1_p4210, SRC_LCD0, 0, 4),
+ MUX(none, "mout_mipi0", group1_p4210, SRC_LCD0, 12, 4),
+ MUX(none, "mout_audio0", mout_audio0_p4210, SRC_MAUDIO, 0, 4),
+ MUX(none, "mout_mmc0", group1_p4210, SRC_FSYS, 0, 4),
+ MUX(none, "mout_mmc1", group1_p4210, SRC_FSYS, 4, 4),
+ MUX(none, "mout_mmc2", group1_p4210, SRC_FSYS, 8, 4),
+ MUX(none, "mout_mmc3", group1_p4210, SRC_FSYS, 12, 4),
+ MUX(none, "mout_mmc4", group1_p4210, SRC_FSYS, 16, 4),
+ MUX(none, "mout_sata", sclk_ampll_p4210, SRC_FSYS, 24, 1),
+ MUX(none, "mout_uart0", group1_p4210, SRC_PERIL0, 0, 4),
+ MUX(none, "mout_uart1", group1_p4210, SRC_PERIL0, 4, 4),
+ MUX(none, "mout_uart2", group1_p4210, SRC_PERIL0, 8, 4),
+ MUX(none, "mout_uart3", group1_p4210, SRC_PERIL0, 12, 4),
+ MUX(none, "mout_uart4", group1_p4210, SRC_PERIL0, 16, 4),
+ MUX(none, "mout_audio1", mout_audio1_p4210, SRC_PERIL1, 0, 4),
+ MUX(none, "mout_audio2", mout_audio2_p4210, SRC_PERIL1, 4, 4),
+ MUX(none, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
+ MUX(none, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
+ MUX(none, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
+};
+
+/* list of mux clocks supported in exynos4x12 soc */
+struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+ MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
+ SRC_CPU, 24, 1),
+ MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
+ MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
+ MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12,
+ SRC_TOP1, 12, 1),
+ MUX(none, "mout_user_aclk266_gps", mout_user_aclk266_gps_p4x12,
+ SRC_TOP1, 16, 1),
+ MUX(aclk200, "aclk200", mout_user_aclk200_p4x12, SRC_TOP1, 20, 1),
+ MUX(aclk400_mcuisp, "aclk400_mcuisp", mout_user_aclk400_mcuisp_p4x12,
+ SRC_TOP1, 24, 1),
+ MUX(none, "mout_aclk200", aclk_p4412, SRC_TOP0, 12, 1),
+ MUX(none, "mout_aclk100", aclk_p4412, SRC_TOP0, 16, 1),
+ MUX(none, "mout_aclk160", aclk_p4412, SRC_TOP0, 20, 1),
+ MUX(none, "mout_aclk133", aclk_p4412, SRC_TOP0, 24, 1),
+ MUX(none, "mout_mdnie0", group1_p4x12, SRC_LCD0, 4, 4),
+ MUX(none, "mout_mdnie_pwm0", group1_p4x12, SRC_LCD0, 8, 4),
+ MUX(none, "mout_sata", sclk_ampll_p4x12, SRC_FSYS, 24, 1),
+ MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
+ MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
+ MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
+ MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p,
+ SRC_DMC, 12, 1, "sclk_mpll"),
+ MUX_A(sclk_vpll, "sclk_vpll", mout_vpll_p,
+ SRC_TOP0, 8, 1, "sclk_vpll"),
+ MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
+ MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
+ MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
+ MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
+ MUX(mout_fimc3, "mout_fimc3", group1_p4x12, SRC_CAM, 12, 4),
+ MUX(mout_cam0, "mout_cam0", group1_p4x12, SRC_CAM, 16, 4),
+ MUX(mout_cam1, "mout_cam1", group1_p4x12, SRC_CAM, 20, 4),
+ MUX(mout_csis0, "mout_csis0", group1_p4x12, SRC_CAM, 24, 4),
+ MUX(mout_csis1, "mout_csis1", group1_p4x12, SRC_CAM, 28, 4),
+ MUX(none, "mout_mfc0", sclk_ampll_p4x12, SRC_MFC, 0, 1),
+ MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4x12, SRC_G3D, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(none, "mout_fimd0", group1_p4x12, SRC_LCD0, 0, 4),
+ MUX(none, "mout_mipi0", group1_p4x12, SRC_LCD0, 12, 4),
+ MUX(none, "mout_audio0", mout_audio0_p4x12, SRC_MAUDIO, 0, 4),
+ MUX(none, "mout_mmc0", group1_p4x12, SRC_FSYS, 0, 4),
+ MUX(none, "mout_mmc1", group1_p4x12, SRC_FSYS, 4, 4),
+ MUX(none, "mout_mmc2", group1_p4x12, SRC_FSYS, 8, 4),
+ MUX(none, "mout_mmc3", group1_p4x12, SRC_FSYS, 12, 4),
+ MUX(none, "mout_mmc4", group1_p4x12, SRC_FSYS, 16, 4),
+ MUX(none, "mout_mipihsi", aclk_p4412, SRC_FSYS, 24, 1),
+ MUX(none, "mout_uart0", group1_p4x12, SRC_PERIL0, 0, 4),
+ MUX(none, "mout_uart1", group1_p4x12, SRC_PERIL0, 4, 4),
+ MUX(none, "mout_uart2", group1_p4x12, SRC_PERIL0, 8, 4),
+ MUX(none, "mout_uart3", group1_p4x12, SRC_PERIL0, 12, 4),
+ MUX(none, "mout_uart4", group1_p4x12, SRC_PERIL0, 16, 4),
+ MUX(none, "mout_audio1", mout_audio1_p4x12, SRC_PERIL1, 0, 4),
+ MUX(none, "mout_audio2", mout_audio2_p4x12, SRC_PERIL1, 4, 4),
+ MUX(none, "mout_spi0", group1_p4x12, SRC_PERIL1, 16, 4),
+ MUX(none, "mout_spi1", group1_p4x12, SRC_PERIL1, 20, 4),
+ MUX(none, "mout_spi2", group1_p4x12, SRC_PERIL1, 24, 4),
+ MUX(none, "mout_pwm_isp", group1_p4x12, E4X12_SRC_ISP, 0, 4),
+ MUX(none, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
+ MUX(none, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
+ MUX(none, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
+};
+
+/* list of divider clocks supported in all exynos4 soc's */
+struct samsung_div_clock exynos4_div_clks[] __initdata = {
+ DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3),
+ DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
+ DIV(none, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
+ DIV(none, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
+ DIV(none, "div_fimc3", "mout_fimc3", DIV_CAM, 12, 4),
+ DIV(none, "div_cam0", "mout_cam0", DIV_CAM, 16, 4),
+ DIV(none, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
+ DIV(none, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
+ DIV(none, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
+ DIV(sclk_mfc, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4),
+ DIV_F(none, "div_g3d", "mout_g3d", DIV_G3D, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(none, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4),
+ DIV(none, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4),
+ DIV(none, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
+ DIV(sclk_pcm0, "sclk_pcm0", "sclk_audio0", DIV_MAUDIO, 4, 8),
+ DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+ DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
+ DIV(sclk_pixel, "sclk_pixel", "sclk_vpll", DIV_TV, 0, 4),
+ DIV(aclk100, "aclk100", "mout_aclk100", DIV_TOP, 4, 4),
+ DIV(aclk160, "aclk160", "mout_aclk160", DIV_TOP, 8, 3),
+ DIV(aclk133, "aclk133", "mout_aclk133", DIV_TOP, 12, 3),
+ DIV(none, "div_onenand", "mout_onenand1", DIV_TOP, 16, 3),
+ DIV(sclk_slimbus, "sclk_slimbus", "sclk_epll", DIV_PERIL3, 4, 4),
+ DIV(sclk_pcm1, "sclk_pcm1", "sclk_audio1", DIV_PERIL4, 4, 8),
+ DIV(sclk_pcm2, "sclk_pcm2", "sclk_audio2", DIV_PERIL4, 20, 8),
+ DIV(sclk_i2s1, "sclk_i2s1", "sclk_audio1", DIV_PERIL5, 0, 6),
+ DIV(sclk_i2s2, "sclk_i2s2", "sclk_audio2", DIV_PERIL5, 8, 6),
+ DIV(none, "div_mmc4", "mout_mmc4", DIV_FSYS3, 0, 4),
+ DIV(none, "div_mmc_pre4", "div_mmc4", DIV_FSYS3, 8, 8),
+ DIV(none, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
+ DIV(none, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
+ DIV(none, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
+ DIV(none, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
+ DIV(none, "div_uart4", "mout_uart4", DIV_PERIL0, 16, 4),
+ DIV(none, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
+ DIV(none, "div_spi_pre0", "div_spi0", DIV_PERIL1, 8, 8),
+ DIV(none, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
+ DIV(none, "div_spi_pre1", "div_spi1", DIV_PERIL1, 24, 8),
+ DIV(none, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
+ DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
+ DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
+ DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
+ DIV_A(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3, "arm_clk"),
+ DIV_A(sclk_apll, "sclk_apll", "mout_apll",
+ DIV_CPU0, 24, 3, "sclk_apll"),
+ DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre1", "div_mmc1", DIV_FSYS1, 24, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre2", "div_mmc2", DIV_FSYS2, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+/* list of divider clocks supported in exynos4210 soc */
+struct samsung_div_clock exynos4210_div_clks[] __initdata = {
+ DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
+ DIV(none, "div_g2d", "mout_g2d", DIV_IMAGE, 0, 4),
+ DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
+ DIV(none, "div_mipi1", "mout_mipi1", E4210_DIV_LCD1, 16, 4),
+ DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
+ DIV_F(none, "div_mipi_pre1", "div_mipi1", E4210_DIV_LCD1, 20, 4,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+/* list of divider clocks supported in exynos4x12 soc */
+struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
+ DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
+ DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
+ DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
+ DIV(none, "div_mipihsi", "mout_mipihsi", DIV_FSYS0, 20, 4),
+ DIV(none, "div_jpeg", "mout_jpeg", E4X12_DIV_CAM1, 0, 4),
+ DIV(div_aclk200, "div_aclk200", "mout_aclk200", DIV_TOP, 0, 3),
+ DIV(none, "div_aclk266_gps", "mout_aclk266_gps", DIV_TOP, 20, 3),
+ DIV(div_aclk400_mcuisp, "div_aclk400_mcuisp", "mout_aclk400_mcuisp",
+ DIV_TOP, 24, 3),
+ DIV(none, "div_pwm_isp", "mout_pwm_isp", E4X12_DIV_ISP, 0, 4),
+ DIV(none, "div_spi0_isp", "mout_spi0_isp", E4X12_DIV_ISP, 4, 4),
+ DIV(none, "div_spi0_isp_pre", "div_spi0_isp", E4X12_DIV_ISP, 8, 8),
+ DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
+ DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
+ DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
+ DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
+ DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+ DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
+ DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3),
+ DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3),
+};
+
+/* list of gate clocks supported in all exynos4 soc's */
+struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
+ /*
+ * After all Exynos4 based platforms are migrated to use device tree,
+ * the device name and clock alias names specified below for some
+ * of the clocks can be removed.
+ */
+ GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
+ GATE(sclk_spdif, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0, 0),
+ GATE(jpeg, "jpeg", "aclk160", GATE_IP_CAM, 6, 0, 0),
+ GATE(mie0, "mie0", "aclk160", GATE_IP_LCD0, 1, 0, 0),
+ GATE(dsim0, "dsim0", "aclk160", GATE_IP_LCD0, 3, 0, 0),
+ GATE(fimd1, "fimd1", "aclk160", E4210_GATE_IP_LCD1, 0, 0, 0),
+ GATE(mie1, "mie1", "aclk160", E4210_GATE_IP_LCD1, 1, 0, 0),
+ GATE(dsim1, "dsim1", "aclk160", E4210_GATE_IP_LCD1, 3, 0, 0),
+ GATE(smmu_fimd1, "smmu_fimd1", "aclk160", E4210_GATE_IP_LCD1, 4, 0, 0),
+ GATE(tsi, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0),
+ GATE(sromc, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
+ GATE(sclk_g3d, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(usb_device, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
+ GATE(onenand, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
+ GATE(nfcon, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
+ GATE(gps, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
+ GATE(smmu_gps, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
+ GATE(slimbus, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
+ GATE(sclk_cam0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_cam1, "sclk_cam1", "div_cam1", GATE_SCLK_CAM, 5,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mipi0, "sclk_mipi0", "div_mipi_pre0",
+ SRC_MASK_LCD0, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_audio0, "sclk_audio0", "div_audio0", SRC_MASK_MAUDIO, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE_D(vp, "s5p-mixer", "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
+ GATE_D(mixer, "s5p-mixer", "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
+ GATE_D(hdmi, "exynos4-hdmi", "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
+ GATE_A(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0, "timers"),
+ GATE_A(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0, "biu"),
+ GATE_A(usb_host, "usb_host", "aclk133",
+ GATE_IP_FSYS, 12, 0, 0, "usbhost"),
+ GATE_DA(sclk_fimc0, "exynos4-fimc.0", "sclk_fimc0", "div_fimc0",
+ SRC_MASK_CAM, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
+ GATE_DA(sclk_fimc1, "exynos4-fimc.1", "sclk_fimc1", "div_fimc1",
+ SRC_MASK_CAM, 4, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
+ GATE_DA(sclk_fimc2, "exynos4-fimc.2", "sclk_fimc2", "div_fimc2",
+ SRC_MASK_CAM, 8, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
+ GATE_DA(sclk_fimc3, "exynos4-fimc.3", "sclk_fimc3", "div_fimc3",
+ SRC_MASK_CAM, 12, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
+ GATE_DA(sclk_csis0, "s5p-mipi-csis.0", "sclk_csis0", "div_csis0",
+ SRC_MASK_CAM, 24, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
+ GATE_DA(sclk_csis1, "s5p-mipi-csis.1", "sclk_csis1", "div_csis1",
+ SRC_MASK_CAM, 28, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
+ GATE_DA(sclk_fimd0, "exynos4-fb.0", "sclk_fimd0", "div_fimd0",
+ SRC_MASK_LCD0, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
+ GATE_DA(sclk_mmc0, "exynos4-sdhci.0", "sclk_mmc0", "div_mmc_pre0",
+ SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0,
+ "mmc_busclk.2"),
+ GATE_DA(sclk_mmc1, "exynos4-sdhci.1", "sclk_mmc1", "div_mmc_pre1",
+ SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0,
+ "mmc_busclk.2"),
+ GATE_DA(sclk_mmc2, "exynos4-sdhci.2", "sclk_mmc2", "div_mmc_pre2",
+ SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0,
+ "mmc_busclk.2"),
+ GATE_DA(sclk_mmc3, "exynos4-sdhci.3", "sclk_mmc3", "div_mmc_pre3",
+ SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0,
+ "mmc_busclk.2"),
+ GATE_DA(sclk_mmc4, NULL, "sclk_mmc4", "div_mmc_pre4",
+ SRC_MASK_FSYS, 16, CLK_SET_RATE_PARENT, 0, "ciu"),
+ GATE_DA(sclk_uart0, "exynos4210-uart.0", "uclk0", "div_uart0",
+ SRC_MASK_PERIL0, 0, CLK_SET_RATE_PARENT,
+ 0, "clk_uart_baud0"),
+ GATE_DA(sclk_uart1, "exynos4210-uart.1", "uclk1", "div_uart1",
+ SRC_MASK_PERIL0, 4, CLK_SET_RATE_PARENT,
+ 0, "clk_uart_baud0"),
+ GATE_DA(sclk_uart2, "exynos4210-uart.2", "uclk2", "div_uart2",
+ SRC_MASK_PERIL0, 8, CLK_SET_RATE_PARENT,
+ 0, "clk_uart_baud0"),
+ GATE_DA(sclk_uart3, "exynos4210-uart.3", "uclk3", "div_uart3",
+ SRC_MASK_PERIL0, 12, CLK_SET_RATE_PARENT,
+ 0, "clk_uart_baud0"),
+ GATE_DA(sclk_uart4, "exynos4210-uart.4", "uclk4", "div_uart4",
+ SRC_MASK_PERIL0, 16, CLK_SET_RATE_PARENT,
+ 0, "clk_uart_baud0"),
+ GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE_DA(sclk_spi0, "exynos4210-spi.0", "sclk_spi0", "div_spi_pre0",
+ SRC_MASK_PERIL1, 16, CLK_SET_RATE_PARENT,
+ 0, "spi_busclk0"),
+ GATE_DA(sclk_spi1, "exynos4210-spi.1", "sclk_spi1", "div_spi_pre1",
+ SRC_MASK_PERIL1, 20, CLK_SET_RATE_PARENT,
+ 0, "spi_busclk0"),
+ GATE_DA(sclk_spi2, "exynos4210-spi.2", "sclk_spi2", "div_spi_pre2",
+ SRC_MASK_PERIL1, 24, CLK_SET_RATE_PARENT,
+ 0, "spi_busclk0"),
+ GATE_DA(fimc0, "exynos4-fimc.0", "fimc0", "aclk160",
+ GATE_IP_CAM, 0, 0, 0, "fimc"),
+ GATE_DA(fimc1, "exynos4-fimc.1", "fimc1", "aclk160",
+ GATE_IP_CAM, 1, 0, 0, "fimc"),
+ GATE_DA(fimc2, "exynos4-fimc.2", "fimc2", "aclk160",
+ GATE_IP_CAM, 2, 0, 0, "fimc"),
+ GATE_DA(fimc3, "exynos4-fimc.3", "fimc3", "aclk160",
+ GATE_IP_CAM, 3, 0, 0, "fimc"),
+ GATE_DA(csis0, "s5p-mipi-csis.0", "csis0", "aclk160",
+ GATE_IP_CAM, 4, 0, 0, "fimc"),
+ GATE_DA(csis1, "s5p-mipi-csis.1", "csis1", "aclk160",
+ GATE_IP_CAM, 5, 0, 0, "fimc"),
+ GATE_DA(smmu_fimc0, "exynos-sysmmu.5", "smmu_fimc0", "aclk160",
+ GATE_IP_CAM, 7, 0, 0, "sysmmu"),
+ GATE_DA(smmu_fimc1, "exynos-sysmmu.6", "smmu_fimc1", "aclk160",
+ GATE_IP_CAM, 8, 0, 0, "sysmmu"),
+ GATE_DA(smmu_fimc2, "exynos-sysmmu.7", "smmu_fimc2", "aclk160",
+ GATE_IP_CAM, 9, 0, 0, "sysmmu"),
+ GATE_DA(smmu_fimc3, "exynos-sysmmu.8", "smmu_fimc3", "aclk160",
+ GATE_IP_CAM, 10, 0, 0, "sysmmu"),
+ GATE_DA(smmu_jpeg, "exynos-sysmmu.3", "smmu_jpeg", "aclk160",
+ GATE_IP_CAM, 11, 0, 0, "sysmmu"),
+ GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
+ GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
+ GATE_DA(smmu_tv, "exynos-sysmmu.2", "smmu_tv", "aclk160",
+ GATE_IP_TV, 4, 0, 0, "sysmmu"),
+ GATE_DA(mfc, "s5p-mfc", "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0, "mfc"),
+ GATE_DA(smmu_mfcl, "exynos-sysmmu.0", "smmu_mfcl", "aclk100",
+ GATE_IP_MFC, 1, 0, 0, "sysmmu"),
+ GATE_DA(smmu_mfcr, "exynos-sysmmu.1", "smmu_mfcr", "aclk100",
+ GATE_IP_MFC, 2, 0, 0, "sysmmu"),
+ GATE_DA(fimd0, "exynos4-fb.0", "fimd0", "aclk160",
+ GATE_IP_LCD0, 0, 0, 0, "fimd"),
+ GATE_DA(smmu_fimd0, "exynos-sysmmu.10", "smmu_fimd0", "aclk160",
+ GATE_IP_LCD0, 4, 0, 0, "sysmmu"),
+ GATE_DA(pdma0, "dma-pl330.0", "pdma0", "aclk133",
+ GATE_IP_FSYS, 0, 0, 0, "dma"),
+ GATE_DA(pdma1, "dma-pl330.1", "pdma1", "aclk133",
+ GATE_IP_FSYS, 1, 0, 0, "dma"),
+ GATE_DA(sdmmc0, "exynos4-sdhci.0", "sdmmc0", "aclk133",
+ GATE_IP_FSYS, 5, 0, 0, "hsmmc"),
+ GATE_DA(sdmmc1, "exynos4-sdhci.1", "sdmmc1", "aclk133",
+ GATE_IP_FSYS, 6, 0, 0, "hsmmc"),
+ GATE_DA(sdmmc2, "exynos4-sdhci.2", "sdmmc2", "aclk133",
+ GATE_IP_FSYS, 7, 0, 0, "hsmmc"),
+ GATE_DA(sdmmc3, "exynos4-sdhci.3", "sdmmc3", "aclk133",
+ GATE_IP_FSYS, 8, 0, 0, "hsmmc"),
+ GATE_DA(uart0, "exynos4210-uart.0", "uart0", "aclk100",
+ GATE_IP_PERIL, 0, 0, 0, "uart"),
+ GATE_DA(uart1, "exynos4210-uart.1", "uart1", "aclk100",
+ GATE_IP_PERIL, 1, 0, 0, "uart"),
+ GATE_DA(uart2, "exynos4210-uart.2", "uart2", "aclk100",
+ GATE_IP_PERIL, 2, 0, 0, "uart"),
+ GATE_DA(uart3, "exynos4210-uart.3", "uart3", "aclk100",
+ GATE_IP_PERIL, 3, 0, 0, "uart"),
+ GATE_DA(uart4, "exynos4210-uart.4", "uart4", "aclk100",
+ GATE_IP_PERIL, 4, 0, 0, "uart"),
+ GATE_DA(i2c0, "s3c2440-i2c.0", "i2c0", "aclk100",
+ GATE_IP_PERIL, 6, 0, 0, "i2c"),
+ GATE_DA(i2c1, "s3c2440-i2c.1", "i2c1", "aclk100",
+ GATE_IP_PERIL, 7, 0, 0, "i2c"),
+ GATE_DA(i2c2, "s3c2440-i2c.2", "i2c2", "aclk100",
+ GATE_IP_PERIL, 8, 0, 0, "i2c"),
+ GATE_DA(i2c3, "s3c2440-i2c.3", "i2c3", "aclk100",
+ GATE_IP_PERIL, 9, 0, 0, "i2c"),
+ GATE_DA(i2c4, "s3c2440-i2c.4", "i2c4", "aclk100",
+ GATE_IP_PERIL, 10, 0, 0, "i2c"),
+ GATE_DA(i2c5, "s3c2440-i2c.5", "i2c5", "aclk100",
+ GATE_IP_PERIL, 11, 0, 0, "i2c"),
+ GATE_DA(i2c6, "s3c2440-i2c.6", "i2c6", "aclk100",
+ GATE_IP_PERIL, 12, 0, 0, "i2c"),
+ GATE_DA(i2c7, "s3c2440-i2c.7", "i2c7", "aclk100",
+ GATE_IP_PERIL, 13, 0, 0, "i2c"),
+ GATE_DA(i2c_hdmi, "s3c2440-hdmiphy-i2c", "i2c-hdmi", "aclk100",
+ GATE_IP_PERIL, 14, 0, 0, "i2c"),
+ GATE_DA(spi0, "exynos4210-spi.0", "spi0", "aclk100",
+ GATE_IP_PERIL, 16, 0, 0, "spi"),
+ GATE_DA(spi1, "exynos4210-spi.1", "spi1", "aclk100",
+ GATE_IP_PERIL, 17, 0, 0, "spi"),
+ GATE_DA(spi2, "exynos4210-spi.2", "spi2", "aclk100",
+ GATE_IP_PERIL, 18, 0, 0, "spi"),
+ GATE_DA(i2s1, "samsung-i2s.1", "i2s1", "aclk100",
+ GATE_IP_PERIL, 20, 0, 0, "iis"),
+ GATE_DA(i2s2, "samsung-i2s.2", "i2s2", "aclk100",
+ GATE_IP_PERIL, 21, 0, 0, "iis"),
+ GATE_DA(pcm1, "samsung-pcm.1", "pcm1", "aclk100",
+ GATE_IP_PERIL, 22, 0, 0, "pcm"),
+ GATE_DA(pcm2, "samsung-pcm.2", "pcm2", "aclk100",
+ GATE_IP_PERIL, 23, 0, 0, "pcm"),
+ GATE_DA(spdif, "samsung-spdif", "spdif", "aclk100",
+ GATE_IP_PERIL, 26, 0, 0, "spdif"),
+ GATE_DA(ac97, "samsung-ac97", "ac97", "aclk100",
+ GATE_IP_PERIL, 27, 0, 0, "ac97"),
+};
+
+/* list of gate clocks supported in exynos4210 soc */
+struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
+ GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
+ GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
+ GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
+ GATE(mdma, "mdma", "aclk200", E4210_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(smmu_g2d, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
+ GATE(smmu_mdma, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0, 0),
+ GATE(pcie_phy, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
+ GATE(sata_phy, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
+ GATE(sata, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ GATE(pcie, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
+ GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
+ GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
+ GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+ GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+ GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
+ GATE(smmu_rotator, "smmu_rotator", "aclk200",
+ E4210_GATE_IP_IMAGE, 4, 0, 0),
+ GATE(sclk_mipi1, "sclk_mipi1", "div_mipi_pre1",
+ E4210_SRC_MASK_LCD1, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_sata, "sclk_sata", "div_sata",
+ SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
+ GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
+ GATE_A(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15, 0, 0, "adc"),
+ GATE_A(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13, 0, 0, "mct"),
+ GATE_A(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
+ GATE_A(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15, 0, 0, "rtc"),
+ GATE_A(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16, 0, 0, "keypad"),
+ GATE_DA(sclk_fimd1, "exynos4-fb.1", "sclk_fimd1", "div_fimd1",
+ E4210_SRC_MASK_LCD1, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
+};
+
+/* list of gate clocks supported in exynos4x12 soc */
+struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
+ GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
+ GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
+ GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
+ GATE(mdma2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
+ GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
+ GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0),
+ GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
+ GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
+ SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mdnie_pwm0, "sclk_mdnie_pwm0", "div_mdnie_pwm_pre0",
+ SRC_MASK_LCD0, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mipihsi, "sclk_mipihsi", "div_mipihsi",
+ SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(smmu_rotator, "smmu_rotator", "aclk200",
+ E4X12_GATE_IP_IMAGE, 4, 0, 0),
+ GATE_A(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13, 0, 0, "mct"),
+ GATE_A(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 0, 0, "rtc"),
+ GATE_A(keyif, "keyif", "aclk100",
+ E4X12_GATE_IP_PERIR, 16, 0, 0, "keypad"),
+ GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp",
+ E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre",
+ E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi1_isp, "sclk_spi1_isp", "div_spi1_isp_pre",
+ E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart_isp, "sclk_uart_isp", "div_uart_isp",
+ E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(pwm_isp_sclk, "pwm_isp_sclk", "sclk_pwm_isp",
+ E4X12_GATE_IP_ISP, 0, 0, 0),
+ GATE(spi0_isp_sclk, "spi0_isp_sclk", "sclk_spi0_isp",
+ E4X12_GATE_IP_ISP, 1, 0, 0),
+ GATE(spi1_isp_sclk, "spi1_isp_sclk", "sclk_spi1_isp",
+ E4X12_GATE_IP_ISP, 2, 0, 0),
+ GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp",
+ E4X12_GATE_IP_ISP, 3, 0, 0),
+ GATE_A(wdt, "watchdog", "aclk100",
+ E4X12_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
+ GATE_DA(pcm0, "samsung-pcm.0", "pcm0", "aclk100",
+ E4X12_GATE_IP_MAUDIO, 2, 0, 0, "pcm"),
+ GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
+ E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
+ GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
+ CLK_IGNORE_UNUSED, 0),
+};
+
+/*
+ * The parent of the fin_pll clock is selected by the XOM[0] bit. This bit
+ * resides in chipid register space, outside of the clock controller memory
+ * mapped space. So to determine the parent of fin_pll clock, the chipid
+ * controller is first remapped and the value of XOM[0] bit is read to
+ * determine the parent clock.
+ */
+static unsigned long exynos4_get_xom(void)
+{
+ unsigned long xom = 0;
+ void __iomem *chipid_base;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-chipid");
+ if (np) {
+ chipid_base = of_iomap(np, 0);
+
+ if (chipid_base)
+ xom = readl(chipid_base + 8);
+
+ iounmap(chipid_base);
+ }
+
+ return xom;
+}
+
+static void __init exynos4_clk_register_finpll(unsigned long xom)
+{
+ struct samsung_fixed_rate_clock fclk;
+ struct clk *clk;
+ unsigned long finpll_f = 24000000;
+ char *parent_name;
+
+ parent_name = xom & 1 ? "xusbxti" : "xxti";
+ clk = clk_get(NULL, parent_name);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to lookup parent clock %s, assuming "
+ "fin_pll clock frequency is 24MHz\n", __func__,
+ parent_name);
+ } else {
+ finpll_f = clk_get_rate(clk);
+ }
+
+ fclk.id = fin_pll;
+ fclk.name = "fin_pll";
+ fclk.parent_name = NULL;
+ fclk.flags = CLK_IS_ROOT;
+ fclk.fixed_rate = finpll_f;
+ samsung_clk_register_fixed_rate(&fclk, 1);
+
+}
+
+/*
+ * This function allows non-dt platforms to specify the clock speed of the
+ * xxti and xusbxti clocks. These clocks are then registered with the specified
+ * clock speed.
+ */
+void __init exynos4_clk_register_fixed_ext(unsigned long xxti_f,
+ unsigned long xusbxti_f)
+{
+ exynos4_fixed_rate_ext_clks[0].fixed_rate = xxti_f;
+ exynos4_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
+ samsung_clk_register_fixed_rate(exynos4_fixed_rate_ext_clks,
+ ARRAY_SIZE(exynos4_fixed_rate_ext_clks));
+}
+
+static __initdata struct of_device_id ext_clk_match[] = {
+ { .compatible = "samsung,clock-xxti", .data = (void *)0, },
+ { .compatible = "samsung,clock-xusbxti", .data = (void *)1, },
+ {},
+};
+
+/* register exynos4 clocks */
+void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_soc, void __iomem *reg_base, unsigned long xom)
+{
+ struct clk *apll, *mpll, *epll, *vpll;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ }
+
+ if (exynos4_soc == EXYNOS4210)
+ samsung_clk_init(np, reg_base, nr_clks,
+ exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
+ exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save));
+ else
+ samsung_clk_init(np, reg_base, nr_clks,
+ exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
+ exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
+
+ if (np)
+ samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
+ ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
+ ext_clk_match);
+
+ exynos4_clk_register_finpll(xom);
+
+ if (exynos4_soc == EXYNOS4210) {
+ apll = samsung_clk_register_pll45xx("fout_apll", "fin_pll",
+ reg_base + APLL_CON0, pll_4508);
+ mpll = samsung_clk_register_pll45xx("fout_mpll", "fin_pll",
+ reg_base + E4210_MPLL_CON0, pll_4508);
+ epll = samsung_clk_register_pll46xx("fout_epll", "fin_pll",
+ reg_base + EPLL_CON0, pll_4600);
+ vpll = samsung_clk_register_pll46xx("fout_vpll", "mout_vpllsrc",
+ reg_base + VPLL_CON0, pll_4650c);
+ } else {
+ apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
+ reg_base + APLL_CON0);
+ mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
+ reg_base + E4X12_MPLL_CON0);
+ epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
+ reg_base + EPLL_CON0);
+ vpll = samsung_clk_register_pll36xx("fout_vpll", "fin_pll",
+ reg_base + VPLL_CON0);
+ }
+
+ samsung_clk_add_lookup(apll, fout_apll);
+ samsung_clk_add_lookup(mpll, fout_mpll);
+ samsung_clk_add_lookup(epll, fout_epll);
+ samsung_clk_add_lookup(vpll, fout_vpll);
+
+ samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks,
+ ARRAY_SIZE(exynos4_fixed_rate_clks));
+ samsung_clk_register_mux(exynos4_mux_clks,
+ ARRAY_SIZE(exynos4_mux_clks));
+ samsung_clk_register_div(exynos4_div_clks,
+ ARRAY_SIZE(exynos4_div_clks));
+ samsung_clk_register_gate(exynos4_gate_clks,
+ ARRAY_SIZE(exynos4_gate_clks));
+
+ if (exynos4_soc == EXYNOS4210) {
+ samsung_clk_register_fixed_rate(exynos4210_fixed_rate_clks,
+ ARRAY_SIZE(exynos4210_fixed_rate_clks));
+ samsung_clk_register_mux(exynos4210_mux_clks,
+ ARRAY_SIZE(exynos4210_mux_clks));
+ samsung_clk_register_div(exynos4210_div_clks,
+ ARRAY_SIZE(exynos4210_div_clks));
+ samsung_clk_register_gate(exynos4210_gate_clks,
+ ARRAY_SIZE(exynos4210_gate_clks));
+ } else {
+ samsung_clk_register_mux(exynos4x12_mux_clks,
+ ARRAY_SIZE(exynos4x12_mux_clks));
+ samsung_clk_register_div(exynos4x12_div_clks,
+ ARRAY_SIZE(exynos4x12_div_clks));
+ samsung_clk_register_gate(exynos4x12_gate_clks,
+ ARRAY_SIZE(exynos4x12_gate_clks));
+ }
+
+ pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
+ "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
+ exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
+ _get_rate("sclk_apll"), _get_rate("sclk_mpll"),
+ _get_rate("sclk_epll"), _get_rate("sclk_vpll"),
+ _get_rate("arm_clk"));
+}
+
+
+static void __init exynos4210_clk_init(struct device_node *np)
+{
+ exynos4_clk_init(np, EXYNOS4210, NULL, exynos4_get_xom());
+}
+CLK_OF_DECLARE(exynos4210_clk, "samsung,exynos4210-clock", exynos4210_clk_init);
+
+static void __init exynos4412_clk_init(struct device_node *np)
+{
+ exynos4_clk_init(np, EXYNOS4X12, NULL, exynos4_get_xom());
+}
+CLK_OF_DECLARE(exynos4412_clk, "samsung,exynos4412-clock", exynos4412_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
new file mode 100644
index 0000000..5c97e75
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos5250 SoC.
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define SRC_CPU 0x200
+#define DIV_CPU0 0x500
+#define SRC_CORE1 0x4204
+#define SRC_TOP0 0x10210
+#define SRC_TOP2 0x10218
+#define SRC_GSCL 0x10220
+#define SRC_DISP1_0 0x1022c
+#define SRC_MAU 0x10240
+#define SRC_FSYS 0x10244
+#define SRC_GEN 0x10248
+#define SRC_PERIC0 0x10250
+#define SRC_PERIC1 0x10254
+#define SRC_MASK_GSCL 0x10320
+#define SRC_MASK_DISP1_0 0x1032c
+#define SRC_MASK_MAU 0x10334
+#define SRC_MASK_FSYS 0x10340
+#define SRC_MASK_GEN 0x10344
+#define SRC_MASK_PERIC0 0x10350
+#define SRC_MASK_PERIC1 0x10354
+#define DIV_TOP0 0x10510
+#define DIV_TOP1 0x10514
+#define DIV_GSCL 0x10520
+#define DIV_DISP1_0 0x1052c
+#define DIV_GEN 0x1053c
+#define DIV_MAU 0x10544
+#define DIV_FSYS0 0x10548
+#define DIV_FSYS1 0x1054c
+#define DIV_FSYS2 0x10550
+#define DIV_PERIC0 0x10558
+#define DIV_PERIC1 0x1055c
+#define DIV_PERIC2 0x10560
+#define DIV_PERIC3 0x10564
+#define DIV_PERIC4 0x10568
+#define DIV_PERIC5 0x1056c
+#define GATE_IP_GSCL 0x10920
+#define GATE_IP_MFC 0x1092c
+#define GATE_IP_GEN 0x10934
+#define GATE_IP_FSYS 0x10944
+#define GATE_IP_PERIC 0x10950
+#define GATE_IP_PERIS 0x10960
+#define SRC_CDREX 0x20200
+#define PLL_DIV2_SEL 0x20a24
+#define GATE_IP_DISP1 0x10928
+
+/*
+ * Let each supported clock get a unique id. This id is used to lookup the clock
+ * for device tree based platforms. The clocks are categorized into three
+ * sections: core, sclk gate and bus interface gate clocks.
+ *
+ * When adding a new clock to this list, it is advised to choose a clock
+ * category and add it to the end of that category. That is because the the
+ * device tree source file is referring to these ids and any change in the
+ * sequence number of existing clocks will require corresponding change in the
+ * device tree files. This limitation would go away when pre-processor support
+ * for dtc would be available.
+ */
+enum exynos5250_clks {
+ none,
+
+ /* core clocks */
+ fin_pll,
+
+ /* gate for special clocks (sclk) */
+ sclk_cam_bayer = 128, sclk_cam0, sclk_cam1, sclk_gscl_wa, sclk_gscl_wb,
+ sclk_fimd1, sclk_mipi1, sclk_dp, sclk_hdmi, sclk_pixel, sclk_audio0,
+ sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
+ sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
+ sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
+
+ /* gate clocks */
+ gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
+ smmu_gscl1, smmu_gscl2, smmu_gscl3, mfc, smmu_mfcl, smmu_mfcr, rotator,
+ jpeg, mdma1, smmu_rotator, smmu_jpeg, smmu_mdma1, pdma0, pdma1, sata,
+ usbotg, mipi_hsi, sdmmc0, sdmmc1, sdmmc2, sdmmc3, sromc, usb2, usb3,
+ sata_phyctrl, sata_phyi2c, uart0, uart1, uart2, uart3, uart4, i2c0,
+ i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, adc, spi0, spi1,
+ spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
+ hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
+ tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
+ wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi,
+
+ nr_clks,
+};
+
+/*
+ * list of controller registers to be saved and restored during a
+ * suspend/resume cycle.
+ */
+static __initdata unsigned long exynos5250_clk_regs[] = {
+ SRC_CPU,
+ DIV_CPU0,
+ SRC_CORE1,
+ SRC_TOP0,
+ SRC_TOP2,
+ SRC_GSCL,
+ SRC_DISP1_0,
+ SRC_MAU,
+ SRC_FSYS,
+ SRC_GEN,
+ SRC_PERIC0,
+ SRC_PERIC1,
+ SRC_MASK_GSCL,
+ SRC_MASK_DISP1_0,
+ SRC_MASK_MAU,
+ SRC_MASK_FSYS,
+ SRC_MASK_GEN,
+ SRC_MASK_PERIC0,
+ SRC_MASK_PERIC1,
+ DIV_TOP0,
+ DIV_TOP1,
+ DIV_GSCL,
+ DIV_DISP1_0,
+ DIV_GEN,
+ DIV_MAU,
+ DIV_FSYS0,
+ DIV_FSYS1,
+ DIV_FSYS2,
+ DIV_PERIC0,
+ DIV_PERIC1,
+ DIV_PERIC2,
+ DIV_PERIC3,
+ DIV_PERIC4,
+ DIV_PERIC5,
+ GATE_IP_GSCL,
+ GATE_IP_MFC,
+ GATE_IP_GEN,
+ GATE_IP_FSYS,
+ GATE_IP_PERIC,
+ GATE_IP_PERIS,
+ SRC_CDREX,
+ PLL_DIV2_SEL,
+ GATE_IP_DISP1,
+};
+
+/* list of all parent clock list */
+PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
+PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
+PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
+PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
+PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
+PNAME(mout_bpll_p) = { "fin_pll", "mout_bpll_fout" };
+PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi27m" };
+PNAME(mout_vpll_p) = { "mout_vpllsrc", "fout_vpll" };
+PNAME(mout_cpll_p) = { "fin_pll", "fout_cpll" };
+PNAME(mout_epll_p) = { "fin_pll", "fout_epll" };
+PNAME(mout_mpll_user_p) = { "fin_pll", "sclk_mpll" };
+PNAME(mout_bpll_user_p) = { "fin_pll", "sclk_bpll" };
+PNAME(mout_aclk166_p) = { "sclk_cpll", "sclk_mpll_user" };
+PNAME(mout_aclk200_p) = { "sclk_mpll_user", "sclk_bpll_user" };
+PNAME(mout_hdmi_p) = { "div_hdmi_pixel", "sclk_hdmiphy" };
+PNAME(mout_usb3_p) = { "sclk_mpll_user", "sclk_cpll" };
+PNAME(mout_group1_p) = { "fin_pll", "fin_pll", "sclk_hdmi27m",
+ "sclk_dptxphy", "sclk_uhostphy", "sclk_hdmiphy",
+ "sclk_mpll_user", "sclk_epll", "sclk_vpll",
+ "sclk_cpll" };
+PNAME(mout_audio0_p) = { "cdclk0", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
+ "sclk_uhostphy", "sclk_hdmiphy",
+ "sclk_mpll_user", "sclk_epll", "sclk_vpll",
+ "sclk_cpll" };
+PNAME(mout_audio1_p) = { "cdclk1", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
+ "sclk_uhostphy", "sclk_hdmiphy",
+ "sclk_mpll_user", "sclk_epll", "sclk_vpll",
+ "sclk_cpll" };
+PNAME(mout_audio2_p) = { "cdclk2", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
+ "sclk_uhostphy", "sclk_hdmiphy",
+ "sclk_mpll_user", "sclk_epll", "sclk_vpll",
+ "sclk_cpll" };
+PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
+ "spdif_extclk" };
+
+/* fixed rate clocks generated outside the soc */
+struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
+ FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
+};
+
+/* fixed rate clocks generated inside the soc */
+struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
+ FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(none, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(none, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(none, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
+};
+
+struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
+ FFACTOR(none, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
+ FFACTOR(none, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
+};
+
+struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
+ MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
+ MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+ MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
+ MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
+ MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
+ MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
+ MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+ MUX(none, "sclk_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
+ MUX(none, "sclk_epll", mout_epll_p, SRC_TOP2, 12, 1),
+ MUX(none, "sclk_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
+ MUX(none, "sclk_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1),
+ MUX(none, "sclk_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1),
+ MUX(none, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1),
+ MUX(none, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1),
+ MUX(none, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1),
+ MUX(none, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4),
+ MUX(none, "mout_cam0", mout_group1_p, SRC_GSCL, 16, 4),
+ MUX(none, "mout_cam1", mout_group1_p, SRC_GSCL, 20, 4),
+ MUX(none, "mout_gscl_wa", mout_group1_p, SRC_GSCL, 24, 4),
+ MUX(none, "mout_gscl_wb", mout_group1_p, SRC_GSCL, 28, 4),
+ MUX(none, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
+ MUX(none, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
+ MUX(none, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
+ MUX(none, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
+ MUX(none, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
+ MUX(none, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
+ MUX(none, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
+ MUX(none, "mout_mmc2", mout_group1_p, SRC_FSYS, 8, 4),
+ MUX(none, "mout_mmc3", mout_group1_p, SRC_FSYS, 12, 4),
+ MUX(none, "mout_sata", mout_aclk200_p, SRC_FSYS, 24, 1),
+ MUX(none, "mout_usb3", mout_usb3_p, SRC_FSYS, 28, 1),
+ MUX(none, "mout_jpeg", mout_group1_p, SRC_GEN, 0, 4),
+ MUX(none, "mout_uart0", mout_group1_p, SRC_PERIC0, 0, 4),
+ MUX(none, "mout_uart1", mout_group1_p, SRC_PERIC0, 4, 4),
+ MUX(none, "mout_uart2", mout_group1_p, SRC_PERIC0, 8, 4),
+ MUX(none, "mout_uart3", mout_group1_p, SRC_PERIC0, 12, 4),
+ MUX(none, "mout_pwm", mout_group1_p, SRC_PERIC0, 24, 4),
+ MUX(none, "mout_audio1", mout_audio1_p, SRC_PERIC1, 0, 4),
+ MUX(none, "mout_audio2", mout_audio2_p, SRC_PERIC1, 4, 4),
+ MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIC1, 8, 2),
+ MUX(none, "mout_spi0", mout_group1_p, SRC_PERIC1, 16, 4),
+ MUX(none, "mout_spi1", mout_group1_p, SRC_PERIC1, 20, 4),
+ MUX(none, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
+};
+
+struct samsung_div_clock exynos5250_div_clks[] __initdata = {
+ DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
+ DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV(none, "aclk66_pre", "sclk_mpll_user", DIV_TOP1, 24, 3),
+ DIV(none, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3),
+ DIV(none, "aclk266", "sclk_mpll_user", DIV_TOP0, 16, 3),
+ DIV(none, "aclk166", "mout_aclk166", DIV_TOP0, 8, 3),
+ DIV(none, "aclk333", "mout_aclk333", DIV_TOP0, 20, 3),
+ DIV(none, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
+ DIV(none, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4),
+ DIV(none, "div_cam0", "mout_cam0", DIV_GSCL, 16, 4),
+ DIV(none, "div_cam1", "mout_cam1", DIV_GSCL, 20, 4),
+ DIV(none, "div_gscl_wa", "mout_gscl_wa", DIV_GSCL, 24, 4),
+ DIV(none, "div_gscl_wb", "mout_gscl_wb", DIV_GSCL, 28, 4),
+ DIV(none, "div_fimd1", "mout_fimd1", DIV_DISP1_0, 0, 4),
+ DIV(none, "div_mipi1", "mout_mipi1", DIV_DISP1_0, 16, 4),
+ DIV(none, "div_dp", "mout_dp", DIV_DISP1_0, 24, 4),
+ DIV(none, "div_jpeg", "mout_jpeg", DIV_GEN, 4, 4),
+ DIV(none, "div_audio0", "mout_audio0", DIV_MAU, 0, 4),
+ DIV(none, "div_pcm0", "sclk_audio0", DIV_MAU, 4, 8),
+ DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
+ DIV(none, "div_usb3", "mout_usb3", DIV_FSYS0, 24, 4),
+ DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+ DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
+ DIV(none, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4),
+ DIV(none, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4),
+ DIV(none, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
+ DIV(none, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
+ DIV(none, "div_spi0", "mout_spi0", DIV_PERIC1, 0, 4),
+ DIV(none, "div_spi1", "mout_spi1", DIV_PERIC1, 16, 4),
+ DIV(none, "div_spi2", "mout_spi2", DIV_PERIC2, 0, 4),
+ DIV(none, "div_pwm", "mout_pwm", DIV_PERIC3, 0, 4),
+ DIV(none, "div_audio1", "mout_audio1", DIV_PERIC4, 0, 4),
+ DIV(none, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8),
+ DIV(none, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4),
+ DIV(none, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8),
+ DIV(none, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6),
+ DIV(none, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
+ DIV(sclk_pixel, "div_hdmi_pixel", "sclk_vpll", DIV_DISP1_0, 28, 4),
+ DIV_A(none, "armclk", "div_arm", DIV_CPU0, 28, 3, "armclk"),
+ DIV_F(none, "div_mipi1_pre", "div_mipi1",
+ DIV_DISP1_0, 20, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre0", "div_mmc0",
+ DIV_FSYS1, 8, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre1", "div_mmc1",
+ DIV_FSYS1, 24, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre2", "div_mmc2",
+ DIV_FSYS2, 8, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_mmc_pre3", "div_mmc3",
+ DIV_FSYS2, 24, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_spi_pre0", "div_spi0",
+ DIV_PERIC1, 8, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_spi_pre1", "div_spi1",
+ DIV_PERIC1, 24, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(none, "div_spi_pre2", "div_spi2",
+ DIV_PERIC2, 8, 8, CLK_SET_RATE_PARENT, 0),
+};
+
+struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+ GATE(gscl0, "gscl0", "none", GATE_IP_GSCL, 0, 0, 0),
+ GATE(gscl1, "gscl1", "none", GATE_IP_GSCL, 1, 0, 0),
+ GATE(gscl2, "gscl2", "aclk266", GATE_IP_GSCL, 2, 0, 0),
+ GATE(gscl3, "gscl3", "aclk266", GATE_IP_GSCL, 3, 0, 0),
+ GATE(gscl_wa, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
+ GATE(gscl_wb, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
+ GATE(smmu_gscl0, "smmu_gscl0", "aclk266", GATE_IP_GSCL, 7, 0, 0),
+ GATE(smmu_gscl1, "smmu_gscl1", "aclk266", GATE_IP_GSCL, 8, 0, 0),
+ GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
+ GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
+ GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
+ GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
+ GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
+ GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
+ GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
+ GATE(smmu_rotator, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
+ GATE(smmu_jpeg, "smmu_jpeg", "aclk166", GATE_IP_GEN, 7, 0, 0),
+ GATE(smmu_mdma1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
+ GATE(pdma0, "pdma0", "aclk200", GATE_IP_FSYS, 1, 0, 0),
+ GATE(pdma1, "pdma1", "aclk200", GATE_IP_FSYS, 2, 0, 0),
+ GATE(sata, "sata", "aclk200", GATE_IP_FSYS, 6, 0, 0),
+ GATE(usbotg, "usbotg", "aclk200", GATE_IP_FSYS, 7, 0, 0),
+ GATE(mipi_hsi, "mipi_hsi", "aclk200", GATE_IP_FSYS, 8, 0, 0),
+ GATE(sdmmc0, "sdmmc0", "aclk200", GATE_IP_FSYS, 12, 0, 0),
+ GATE(sdmmc1, "sdmmc1", "aclk200", GATE_IP_FSYS, 13, 0, 0),
+ GATE(sdmmc2, "sdmmc2", "aclk200", GATE_IP_FSYS, 14, 0, 0),
+ GATE(sdmmc3, "sdmmc3", "aclk200", GATE_IP_FSYS, 15, 0, 0),
+ GATE(sromc, "sromc", "aclk200", GATE_IP_FSYS, 17, 0, 0),
+ GATE(usb2, "usb2", "aclk200", GATE_IP_FSYS, 18, 0, 0),
+ GATE(usb3, "usb3", "aclk200", GATE_IP_FSYS, 19, 0, 0),
+ GATE(sata_phyctrl, "sata_phyctrl", "aclk200", GATE_IP_FSYS, 24, 0, 0),
+ GATE(sata_phyi2c, "sata_phyi2c", "aclk200", GATE_IP_FSYS, 25, 0, 0),
+ GATE(uart0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0),
+ GATE(uart1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0),
+ GATE(uart2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0),
+ GATE(uart3, "uart3", "aclk66", GATE_IP_PERIC, 3, 0, 0),
+ GATE(uart4, "uart4", "aclk66", GATE_IP_PERIC, 4, 0, 0),
+ GATE(i2c0, "i2c0", "aclk66", GATE_IP_PERIC, 6, 0, 0),
+ GATE(i2c1, "i2c1", "aclk66", GATE_IP_PERIC, 7, 0, 0),
+ GATE(i2c2, "i2c2", "aclk66", GATE_IP_PERIC, 8, 0, 0),
+ GATE(i2c3, "i2c3", "aclk66", GATE_IP_PERIC, 9, 0, 0),
+ GATE(i2c4, "i2c4", "aclk66", GATE_IP_PERIC, 10, 0, 0),
+ GATE(i2c5, "i2c5", "aclk66", GATE_IP_PERIC, 11, 0, 0),
+ GATE(i2c6, "i2c6", "aclk66", GATE_IP_PERIC, 12, 0, 0),
+ GATE(i2c7, "i2c7", "aclk66", GATE_IP_PERIC, 13, 0, 0),
+ GATE(i2c_hdmi, "i2c_hdmi", "aclk66", GATE_IP_PERIC, 14, 0, 0),
+ GATE(adc, "adc", "aclk66", GATE_IP_PERIC, 15, 0, 0),
+ GATE(spi0, "spi0", "aclk66", GATE_IP_PERIC, 16, 0, 0),
+ GATE(spi1, "spi1", "aclk66", GATE_IP_PERIC, 17, 0, 0),
+ GATE(spi2, "spi2", "aclk66", GATE_IP_PERIC, 18, 0, 0),
+ GATE(i2s1, "i2s1", "aclk66", GATE_IP_PERIC, 20, 0, 0),
+ GATE(i2s2, "i2s2", "aclk66", GATE_IP_PERIC, 21, 0, 0),
+ GATE(pcm1, "pcm1", "aclk66", GATE_IP_PERIC, 22, 0, 0),
+ GATE(pcm2, "pcm2", "aclk66", GATE_IP_PERIC, 23, 0, 0),
+ GATE(pwm, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0),
+ GATE(spdif, "spdif", "aclk66", GATE_IP_PERIC, 26, 0, 0),
+ GATE(ac97, "ac97", "aclk66", GATE_IP_PERIC, 27, 0, 0),
+ GATE(hsi2c0, "hsi2c0", "aclk66", GATE_IP_PERIC, 28, 0, 0),
+ GATE(hsi2c1, "hsi2c1", "aclk66", GATE_IP_PERIC, 29, 0, 0),
+ GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
+ GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
+ GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
+ GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
+ GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
+ GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
+ GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
+ GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
+ GATE(tzpc3, "tzpc3", "aclk66", GATE_IP_PERIS, 9, 0, 0),
+ GATE(tzpc4, "tzpc4", "aclk66", GATE_IP_PERIS, 10, 0, 0),
+ GATE(tzpc5, "tzpc5", "aclk66", GATE_IP_PERIS, 11, 0, 0),
+ GATE(tzpc6, "tzpc6", "aclk66", GATE_IP_PERIS, 12, 0, 0),
+ GATE(tzpc7, "tzpc7", "aclk66", GATE_IP_PERIS, 13, 0, 0),
+ GATE(tzpc8, "tzpc8", "aclk66", GATE_IP_PERIS, 14, 0, 0),
+ GATE(tzpc9, "tzpc9", "aclk66", GATE_IP_PERIS, 15, 0, 0),
+ GATE(hdmi_cec, "hdmi_cec", "aclk66", GATE_IP_PERIS, 16, 0, 0),
+ GATE(mct, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0),
+ GATE(wdt, "wdt", "aclk66", GATE_IP_PERIS, 19, 0, 0),
+ GATE(rtc, "rtc", "aclk66", GATE_IP_PERIS, 20, 0, 0),
+ GATE(tmu, "tmu", "aclk66", GATE_IP_PERIS, 21, 0, 0),
+ GATE(cmu_top, "cmu_top", "aclk66",
+ GATE_IP_PERIS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(cmu_core, "cmu_core", "aclk66",
+ GATE_IP_PERIS, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(cmu_mem, "cmu_mem", "aclk66",
+ GATE_IP_PERIS, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(sclk_cam_bayer, "sclk_cam_bayer", "div_cam_bayer",
+ SRC_MASK_GSCL, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_cam0, "sclk_cam0", "div_cam0",
+ SRC_MASK_GSCL, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_cam1, "sclk_cam1", "div_cam1",
+ SRC_MASK_GSCL, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_gscl_wa, "sclk_gscl_wa", "div_gscl_wa",
+ SRC_MASK_GSCL, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_gscl_wb, "sclk_gscl_wb", "div_gscl_wb",
+ SRC_MASK_GSCL, 28, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1",
+ SRC_MASK_DISP1_0, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mipi1, "sclk_mipi1", "div_mipi1",
+ SRC_MASK_DISP1_0, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_dp, "sclk_dp", "div_dp",
+ SRC_MASK_DISP1_0, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi",
+ SRC_MASK_DISP1_0, 20, 0, 0),
+ GATE(sclk_audio0, "sclk_audio0", "div_audio0",
+ SRC_MASK_MAU, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0",
+ SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1",
+ SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2",
+ SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3",
+ SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_sata, "sclk_sata", "div_sata",
+ SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_usb3, "sclk_usb3", "div_usb3",
+ SRC_MASK_FSYS, 28, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_jpeg, "sclk_jpeg", "div_jpeg",
+ SRC_MASK_GEN, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart0, "sclk_uart0", "div_uart0",
+ SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart1, "sclk_uart1", "div_uart1",
+ SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart2, "sclk_uart2", "div_uart2",
+ SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart3, "sclk_uart3", "div_uart3",
+ SRC_MASK_PERIC0, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_pwm, "sclk_pwm", "div_pwm",
+ SRC_MASK_PERIC0, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_audio1, "sclk_audio1", "div_audio1",
+ SRC_MASK_PERIC1, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_audio2, "sclk_audio2", "div_audio2",
+ SRC_MASK_PERIC1, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spdif, "sclk_spdif", "mout_spdif",
+ SRC_MASK_PERIC1, 4, 0, 0),
+ GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0",
+ SRC_MASK_PERIC1, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1",
+ SRC_MASK_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2",
+ SRC_MASK_PERIC1, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(fimd1, "fimd1", "aclk200", GATE_IP_DISP1, 0, 0, 0),
+ GATE(mie1, "mie1", "aclk200", GATE_IP_DISP1, 1, 0, 0),
+ GATE(dsim0, "dsim0", "aclk200", GATE_IP_DISP1, 3, 0, 0),
+ GATE(dp, "dp", "aclk200", GATE_IP_DISP1, 4, 0, 0),
+ GATE(mixer, "mixer", "aclk200", GATE_IP_DISP1, 5, 0, 0),
+ GATE(hdmi, "hdmi", "aclk200", GATE_IP_DISP1, 6, 0, 0),
+};
+
+static __initdata struct of_device_id ext_clk_match[] = {
+ { .compatible = "samsung,clock-xxti", .data = (void *)0, },
+ { },
+};
+
+/* register exynox5250 clocks */
+void __init exynos5250_clk_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+ struct clk *apll, *mpll, *epll, *vpll, *bpll, *gpll, *cpll;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ } else {
+ panic("%s: unable to determine soc\n", __func__);
+ }
+
+ samsung_clk_init(np, reg_base, nr_clks,
+ exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs),
+ NULL, 0);
+ samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
+ ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
+ ext_clk_match);
+
+ apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
+ reg_base + 0x100);
+ mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
+ reg_base + 0x4100);
+ bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
+ reg_base + 0x20110);
+ gpll = samsung_clk_register_pll35xx("fout_gpll", "fin_pll",
+ reg_base + 0x10150);
+ cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
+ reg_base + 0x10120);
+ epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
+ reg_base + 0x10130);
+ vpll = samsung_clk_register_pll36xx("fout_vpll", "mout_vpllsrc",
+ reg_base + 0x10140);
+
+ samsung_clk_register_fixed_rate(exynos5250_fixed_rate_clks,
+ ARRAY_SIZE(exynos5250_fixed_rate_clks));
+ samsung_clk_register_fixed_factor(exynos5250_fixed_factor_clks,
+ ARRAY_SIZE(exynos5250_fixed_factor_clks));
+ samsung_clk_register_mux(exynos5250_mux_clks,
+ ARRAY_SIZE(exynos5250_mux_clks));
+ samsung_clk_register_div(exynos5250_div_clks,
+ ARRAY_SIZE(exynos5250_div_clks));
+ samsung_clk_register_gate(exynos5250_gate_clks,
+ ARRAY_SIZE(exynos5250_gate_clks));
+
+ pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
+ _get_rate("armclk"));
+}
+CLK_OF_DECLARE(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
new file mode 100644
index 0000000..7d54341
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos5440 SoC.
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define CLKEN_OV_VAL 0xf8
+#define CPU_CLK_STATUS 0xfc
+#define MISC_DOUT1 0x558
+
+/*
+ * Let each supported clock get a unique id. This id is used to lookup the clock
+ * for device tree based platforms.
+ */
+enum exynos5440_clks {
+ none, xtal, arm_clk,
+
+ spi_baud = 16, pb0_250, pr0_250, pr1_250, b_250, b_125, b_200, sata,
+ usb, gmac0, cs250, pb0_250_o, pr0_250_o, pr1_250_o, b_250_o, b_125_o,
+ b_200_o, sata_o, usb_o, gmac0_o, cs250_o,
+
+ nr_clks,
+};
+
+/* parent clock name list */
+PNAME(mout_armclk_p) = { "cplla", "cpllb" };
+PNAME(mout_spi_p) = { "div125", "div200" };
+
+/* fixed rate clocks generated outside the soc */
+struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
+ FRATE(none, "xtal", NULL, CLK_IS_ROOT, 0),
+};
+
+/* fixed rate clocks */
+struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
+ FRATE(none, "ppll", NULL, CLK_IS_ROOT, 1000000000),
+ FRATE(none, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
+ FRATE(none, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
+ FRATE(none, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
+ FRATE(none, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
+};
+
+/* fixed factor clocks */
+struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
+ FFACTOR(none, "div250", "ppll", 1, 4, 0),
+ FFACTOR(none, "div200", "ppll", 1, 5, 0),
+ FFACTOR(none, "div125", "div250", 1, 2, 0),
+};
+
+/* mux clocks */
+struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
+ MUX(none, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
+ MUX_A(arm_clk, "arm_clk", mout_armclk_p,
+ CPU_CLK_STATUS, 0, 1, "armclk"),
+};
+
+/* divider clocks */
+struct samsung_div_clock exynos5440_div_clks[] __initdata = {
+ DIV(spi_baud, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
+};
+
+/* gate clocks */
+struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
+ GATE(pb0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
+ GATE(pr0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
+ GATE(pr1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
+ GATE(b_250, "b_250", "div250", CLKEN_OV_VAL, 9, 0, 0),
+ GATE(b_125, "b_125", "div125", CLKEN_OV_VAL, 10, 0, 0),
+ GATE(b_200, "b_200", "div200", CLKEN_OV_VAL, 11, 0, 0),
+ GATE(sata, "sata", "div200", CLKEN_OV_VAL, 12, 0, 0),
+ GATE(usb, "usb", "div200", CLKEN_OV_VAL, 13, 0, 0),
+ GATE(gmac0, "gmac0", "div200", CLKEN_OV_VAL, 14, 0, 0),
+ GATE(cs250, "cs250", "div250", CLKEN_OV_VAL, 19, 0, 0),
+ GATE(pb0_250_o, "pb0_250_o", "pb0_250", CLKEN_OV_VAL, 3, 0, 0),
+ GATE(pr0_250_o, "pr0_250_o", "pr0_250", CLKEN_OV_VAL, 4, 0, 0),
+ GATE(pr1_250_o, "pr1_250_o", "pr1_250", CLKEN_OV_VAL, 5, 0, 0),
+ GATE(b_250_o, "b_250_o", "b_250", CLKEN_OV_VAL, 9, 0, 0),
+ GATE(b_125_o, "b_125_o", "b_125", CLKEN_OV_VAL, 10, 0, 0),
+ GATE(b_200_o, "b_200_o", "b_200", CLKEN_OV_VAL, 11, 0, 0),
+ GATE(sata_o, "sata_o", "sata", CLKEN_OV_VAL, 12, 0, 0),
+ GATE(usb_o, "usb_o", "usb", CLKEN_OV_VAL, 13, 0, 0),
+ GATE(gmac0_o, "gmac0_o", "gmac", CLKEN_OV_VAL, 14, 0, 0),
+ GATE(cs250_o, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
+};
+
+static __initdata struct of_device_id ext_clk_match[] = {
+ { .compatible = "samsung,clock-xtal", .data = (void *)0, },
+ {},
+};
+
+/* register exynos5440 clocks */
+void __init exynos5440_clk_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: failed to map clock controller registers,"
+ " aborting clock initialization\n", __func__);
+ return;
+ }
+
+ samsung_clk_init(np, reg_base, nr_clks, NULL, 0, NULL, 0);
+ samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks,
+ ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
+
+ samsung_clk_register_pll2550x("cplla", "xtal", reg_base + 0x1c, 0x10);
+ samsung_clk_register_pll2550x("cpllb", "xtal", reg_base + 0x20, 0x10);
+
+ samsung_clk_register_fixed_rate(exynos5440_fixed_rate_clks,
+ ARRAY_SIZE(exynos5440_fixed_rate_clks));
+ samsung_clk_register_fixed_factor(exynos5440_fixed_factor_clks,
+ ARRAY_SIZE(exynos5440_fixed_factor_clks));
+ samsung_clk_register_mux(exynos5440_mux_clks,
+ ARRAY_SIZE(exynos5440_mux_clks));
+ samsung_clk_register_div(exynos5440_div_clks,
+ ARRAY_SIZE(exynos5440_div_clks));
+ samsung_clk_register_gate(exynos5440_gate_clks,
+ ARRAY_SIZE(exynos5440_gate_clks));
+
+ pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("armclk"));
+ pr_info("exynos5440 clock initialization complete\n");
+}
+CLK_OF_DECLARE(exynos5440_clk, "samsung,exynos5440-clock", exynos5440_clk_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
new file mode 100644
index 0000000..89135f6
--- /dev/null
+++ b/drivers/clk/samsung/clk-pll.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the utility functions to register the pll clocks.
+*/
+
+#include <linux/errno.h>
+#include "clk.h"
+#include "clk-pll.h"
+
+/*
+ * PLL35xx Clock Type
+ */
+
+#define PLL35XX_MDIV_MASK (0x3FF)
+#define PLL35XX_PDIV_MASK (0x3F)
+#define PLL35XX_SDIV_MASK (0x7)
+#define PLL35XX_MDIV_SHIFT (16)
+#define PLL35XX_PDIV_SHIFT (8)
+#define PLL35XX_SDIV_SHIFT (0)
+
+struct samsung_clk_pll35xx {
+ struct clk_hw hw;
+ const void __iomem *con_reg;
+};
+
+#define to_clk_pll35xx(_hw) container_of(_hw, struct samsung_clk_pll35xx, hw)
+
+static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll35xx *pll = to_clk_pll35xx(hw);
+ u32 mdiv, pdiv, sdiv, pll_con;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
+ pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
+ sdiv = (pll_con >> PLL35XX_SDIV_SHIFT) & PLL35XX_SDIV_MASK;
+
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll35xx_clk_ops = {
+ .recalc_rate = samsung_pll35xx_recalc_rate,
+};
+
+struct clk * __init samsung_clk_register_pll35xx(const char *name,
+ const char *pname, const void __iomem *con_reg)
+{
+ struct samsung_clk_pll35xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n", __func__, name);
+ return NULL;
+ }
+
+ init.name = name;
+ init.ops = &samsung_pll35xx_clk_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.parent_names = &pname;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+ pll->con_reg = con_reg;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s\n", __func__,
+ name);
+ kfree(pll);
+ }
+
+ if (clk_register_clkdev(clk, name, NULL))
+ pr_err("%s: failed to register lookup for %s", __func__, name);
+
+ return clk;
+}
+
+/*
+ * PLL36xx Clock Type
+ */
+
+#define PLL36XX_KDIV_MASK (0xFFFF)
+#define PLL36XX_MDIV_MASK (0x1FF)
+#define PLL36XX_PDIV_MASK (0x3F)
+#define PLL36XX_SDIV_MASK (0x7)
+#define PLL36XX_MDIV_SHIFT (16)
+#define PLL36XX_PDIV_SHIFT (8)
+#define PLL36XX_SDIV_SHIFT (0)
+
+struct samsung_clk_pll36xx {
+ struct clk_hw hw;
+ const void __iomem *con_reg;
+};
+
+#define to_clk_pll36xx(_hw) container_of(_hw, struct samsung_clk_pll36xx, hw)
+
+static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+ u64 fvco = parent_rate;
+
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con1 = __raw_readl(pll->con_reg + 4);
+ mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
+ kdiv = pll_con1 & PLL36XX_KDIV_MASK;
+
+ fvco *= (mdiv << 16) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= 16;
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll36xx_clk_ops = {
+ .recalc_rate = samsung_pll36xx_recalc_rate,
+};
+
+struct clk * __init samsung_clk_register_pll36xx(const char *name,
+ const char *pname, const void __iomem *con_reg)
+{
+ struct samsung_clk_pll36xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n", __func__, name);
+ return NULL;
+ }
+
+ init.name = name;
+ init.ops = &samsung_pll36xx_clk_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.parent_names = &pname;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+ pll->con_reg = con_reg;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s\n", __func__,
+ name);
+ kfree(pll);
+ }
+
+ if (clk_register_clkdev(clk, name, NULL))
+ pr_err("%s: failed to register lookup for %s", __func__, name);
+
+ return clk;
+}
+
+/*
+ * PLL45xx Clock Type
+ */
+
+#define PLL45XX_MDIV_MASK (0x3FF)
+#define PLL45XX_PDIV_MASK (0x3F)
+#define PLL45XX_SDIV_MASK (0x7)
+#define PLL45XX_MDIV_SHIFT (16)
+#define PLL45XX_PDIV_SHIFT (8)
+#define PLL45XX_SDIV_SHIFT (0)
+
+struct samsung_clk_pll45xx {
+ struct clk_hw hw;
+ enum pll45xx_type type;
+ const void __iomem *con_reg;
+};
+
+#define to_clk_pll45xx(_hw) container_of(_hw, struct samsung_clk_pll45xx, hw)
+
+static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll45xx *pll = to_clk_pll45xx(hw);
+ u32 mdiv, pdiv, sdiv, pll_con;
+ u64 fvco = parent_rate;
+
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK;
+ pdiv = (pll_con >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK;
+ sdiv = (pll_con >> PLL45XX_SDIV_SHIFT) & PLL45XX_SDIV_MASK;
+
+ if (pll->type == pll_4508)
+ sdiv = sdiv - 1;
+
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll45xx_clk_ops = {
+ .recalc_rate = samsung_pll45xx_recalc_rate,
+};
+
+struct clk * __init samsung_clk_register_pll45xx(const char *name,
+ const char *pname, const void __iomem *con_reg,
+ enum pll45xx_type type)
+{
+ struct samsung_clk_pll45xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n", __func__, name);
+ return NULL;
+ }
+
+ init.name = name;
+ init.ops = &samsung_pll45xx_clk_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.parent_names = &pname;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+ pll->con_reg = con_reg;
+ pll->type = type;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s\n", __func__,
+ name);
+ kfree(pll);
+ }
+
+ if (clk_register_clkdev(clk, name, NULL))
+ pr_err("%s: failed to register lookup for %s", __func__, name);
+
+ return clk;
+}
+
+/*
+ * PLL46xx Clock Type
+ */
+
+#define PLL46XX_MDIV_MASK (0x1FF)
+#define PLL46XX_PDIV_MASK (0x3F)
+#define PLL46XX_SDIV_MASK (0x7)
+#define PLL46XX_MDIV_SHIFT (16)
+#define PLL46XX_PDIV_SHIFT (8)
+#define PLL46XX_SDIV_SHIFT (0)
+
+#define PLL46XX_KDIV_MASK (0xFFFF)
+#define PLL4650C_KDIV_MASK (0xFFF)
+#define PLL46XX_KDIV_SHIFT (0)
+
+struct samsung_clk_pll46xx {
+ struct clk_hw hw;
+ enum pll46xx_type type;
+ const void __iomem *con_reg;
+};
+
+#define to_clk_pll46xx(_hw) container_of(_hw, struct samsung_clk_pll46xx, hw)
+
+static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll46xx *pll = to_clk_pll46xx(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift;
+ u64 fvco = parent_rate;
+
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con1 = __raw_readl(pll->con_reg + 4);
+ mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK;
+ kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK :
+ pll_con1 & PLL46XX_KDIV_MASK;
+
+ shift = pll->type == pll_4600 ? 16 : 10;
+ fvco *= (mdiv << shift) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= shift;
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll46xx_clk_ops = {
+ .recalc_rate = samsung_pll46xx_recalc_rate,
+};
+
+struct clk * __init samsung_clk_register_pll46xx(const char *name,
+ const char *pname, const void __iomem *con_reg,
+ enum pll46xx_type type)
+{
+ struct samsung_clk_pll46xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n", __func__, name);
+ return NULL;
+ }
+
+ init.name = name;
+ init.ops = &samsung_pll46xx_clk_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.parent_names = &pname;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+ pll->con_reg = con_reg;
+ pll->type = type;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s\n", __func__,
+ name);
+ kfree(pll);
+ }
+
+ if (clk_register_clkdev(clk, name, NULL))
+ pr_err("%s: failed to register lookup for %s", __func__, name);
+
+ return clk;
+}
+
+/*
+ * PLL2550x Clock Type
+ */
+
+#define PLL2550X_R_MASK (0x1)
+#define PLL2550X_P_MASK (0x3F)
+#define PLL2550X_M_MASK (0x3FF)
+#define PLL2550X_S_MASK (0x7)
+#define PLL2550X_R_SHIFT (20)
+#define PLL2550X_P_SHIFT (14)
+#define PLL2550X_M_SHIFT (4)
+#define PLL2550X_S_SHIFT (0)
+
+struct samsung_clk_pll2550x {
+ struct clk_hw hw;
+ const void __iomem *reg_base;
+ unsigned long offset;
+};
+
+#define to_clk_pll2550x(_hw) container_of(_hw, struct samsung_clk_pll2550x, hw)
+
+static unsigned long samsung_pll2550x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll2550x *pll = to_clk_pll2550x(hw);
+ u32 r, p, m, s, pll_stat;
+ u64 fvco = parent_rate;
+
+ pll_stat = __raw_readl(pll->reg_base + pll->offset * 3);
+ r = (pll_stat >> PLL2550X_R_SHIFT) & PLL2550X_R_MASK;
+ if (!r)
+ return 0;
+ p = (pll_stat >> PLL2550X_P_SHIFT) & PLL2550X_P_MASK;
+ m = (pll_stat >> PLL2550X_M_SHIFT) & PLL2550X_M_MASK;
+ s = (pll_stat >> PLL2550X_S_SHIFT) & PLL2550X_S_MASK;
+
+ fvco *= m;
+ do_div(fvco, (p << s));
+
+ return (unsigned long)fvco;
+}
+
+static const struct clk_ops samsung_pll2550x_clk_ops = {
+ .recalc_rate = samsung_pll2550x_recalc_rate,
+};
+
+struct clk * __init samsung_clk_register_pll2550x(const char *name,
+ const char *pname, const void __iomem *reg_base,
+ const unsigned long offset)
+{
+ struct samsung_clk_pll2550x *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n", __func__, name);
+ return NULL;
+ }
+
+ init.name = name;
+ init.ops = &samsung_pll2550x_clk_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.parent_names = &pname;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+ pll->reg_base = reg_base;
+ pll->offset = offset;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s\n", __func__,
+ name);
+ kfree(pll);
+ }
+
+ if (clk_register_clkdev(clk, name, NULL))
+ pr_err("%s: failed to register lookup for %s", __func__, name);
+
+ return clk;
+}
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
new file mode 100644
index 0000000..f33786e
--- /dev/null
+++ b/drivers/clk/samsung/clk-pll.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all PLL's in Samsung platforms
+*/
+
+#ifndef __SAMSUNG_CLK_PLL_H
+#define __SAMSUNG_CLK_PLL_H
+
+enum pll45xx_type {
+ pll_4500,
+ pll_4502,
+ pll_4508
+};
+
+enum pll46xx_type {
+ pll_4600,
+ pll_4650,
+ pll_4650c,
+};
+
+extern struct clk * __init samsung_clk_register_pll35xx(const char *name,
+ const char *pname, const void __iomem *con_reg);
+extern struct clk * __init samsung_clk_register_pll36xx(const char *name,
+ const char *pname, const void __iomem *con_reg);
+extern struct clk * __init samsung_clk_register_pll45xx(const char *name,
+ const char *pname, const void __iomem *con_reg,
+ enum pll45xx_type type);
+extern struct clk * __init samsung_clk_register_pll46xx(const char *name,
+ const char *pname, const void __iomem *con_reg,
+ enum pll46xx_type type);
+extern struct clk * __init samsung_clk_register_pll2550x(const char *name,
+ const char *pname, const void __iomem *reg_base,
+ const unsigned long offset);
+
+#endif /* __SAMSUNG_CLK_PLL_H */
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
new file mode 100644
index 0000000..cd3c40a
--- /dev/null
+++ b/drivers/clk/samsung/clk.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file includes utility functions to register clocks to common
+ * clock framework for Samsung platforms.
+*/
+
+#include <linux/syscore_ops.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(lock);
+static struct clk **clk_table;
+static void __iomem *reg_base;
+#ifdef CONFIG_OF
+static struct clk_onecell_data clk_data;
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *reg_dump;
+static unsigned long nr_reg_dump;
+
+static int samsung_clk_suspend(void)
+{
+ struct samsung_clk_reg_dump *rd = reg_dump;
+ unsigned long i;
+
+ for (i = 0; i < nr_reg_dump; i++, rd++)
+ rd->value = __raw_readl(reg_base + rd->offset);
+
+ return 0;
+}
+
+static void samsung_clk_resume(void)
+{
+ struct samsung_clk_reg_dump *rd = reg_dump;
+ unsigned long i;
+
+ for (i = 0; i < nr_reg_dump; i++, rd++)
+ __raw_writel(rd->value, reg_base + rd->offset);
+}
+
+static struct syscore_ops samsung_clk_syscore_ops = {
+ .suspend = samsung_clk_suspend,
+ .resume = samsung_clk_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+/* setup the essentials required to support clock lookup using ccf */
+void __init samsung_clk_init(struct device_node *np, void __iomem *base,
+ unsigned long nr_clks, unsigned long *rdump,
+ unsigned long nr_rdump, unsigned long *soc_rdump,
+ unsigned long nr_soc_rdump)
+{
+ reg_base = base;
+
+#ifdef CONFIG_PM_SLEEP
+ if (rdump && nr_rdump) {
+ unsigned int idx;
+ reg_dump = kzalloc(sizeof(struct samsung_clk_reg_dump)
+ * (nr_rdump + nr_soc_rdump), GFP_KERNEL);
+ if (!reg_dump) {
+ pr_err("%s: memory alloc for register dump failed\n",
+ __func__);
+ return;
+ }
+
+ for (idx = 0; idx < nr_rdump; idx++)
+ reg_dump[idx].offset = rdump[idx];
+ for (idx = 0; idx < nr_soc_rdump; idx++)
+ reg_dump[nr_rdump + idx].offset = soc_rdump[idx];
+ nr_reg_dump = nr_rdump + nr_soc_rdump;
+ register_syscore_ops(&samsung_clk_syscore_ops);
+ }
+#endif
+
+ clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
+ if (!clk_table)
+ panic("could not allocate clock lookup table\n");
+
+ if (!np)
+ return;
+
+#ifdef CONFIG_OF
+ clk_data.clks = clk_table;
+ clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+#endif
+}
+
+/* add a clock instance to the clock lookup table used for dt based lookup */
+void samsung_clk_add_lookup(struct clk *clk, unsigned int id)
+{
+ if (clk_table && id)
+ clk_table[id] = clk;
+}
+
+/* register a list of aliases */
+void __init samsung_clk_register_alias(struct samsung_clock_alias *list,
+ unsigned int nr_clk)
+{
+ struct clk *clk;
+ unsigned int idx, ret;
+
+ if (!clk_table) {
+ pr_err("%s: clock table missing\n", __func__);
+ return;
+ }
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ if (!list->id) {
+ pr_err("%s: clock id missing for index %d\n", __func__,
+ idx);
+ continue;
+ }
+
+ clk = clk_table[list->id];
+ if (!clk) {
+ pr_err("%s: failed to find clock %d\n", __func__,
+ list->id);
+ continue;
+ }
+
+ ret = clk_register_clkdev(clk, list->alias, list->dev_name);
+ if (ret)
+ pr_err("%s: failed to register lookup %s\n",
+ __func__, list->alias);
+ }
+}
+
+/* register a list of fixed clocks */
+void __init samsung_clk_register_fixed_rate(
+ struct samsung_fixed_rate_clock *list, unsigned int nr_clk)
+{
+ struct clk *clk;
+ unsigned int idx, ret;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ clk = clk_register_fixed_rate(NULL, list->name,
+ list->parent_name, list->flags, list->fixed_rate);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ list->name);
+ continue;
+ }
+
+ samsung_clk_add_lookup(clk, list->id);
+
+ /*
+ * Unconditionally add a clock lookup for the fixed rate clocks.
+ * There are not many of these on any of Samsung platforms.
+ */
+ ret = clk_register_clkdev(clk, list->name, NULL);
+ if (ret)
+ pr_err("%s: failed to register clock lookup for %s",
+ __func__, list->name);
+ }
+}
+
+/* register a list of fixed factor clocks */
+void __init samsung_clk_register_fixed_factor(
+ struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
+{
+ struct clk *clk;
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ clk = clk_register_fixed_factor(NULL, list->name,
+ list->parent_name, list->flags, list->mult, list->div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ list->name);
+ continue;
+ }
+
+ samsung_clk_add_lookup(clk, list->id);
+ }
+}
+
+/* register a list of mux clocks */
+void __init samsung_clk_register_mux(struct samsung_mux_clock *list,
+ unsigned int nr_clk)
+{
+ struct clk *clk;
+ unsigned int idx, ret;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ clk = clk_register_mux(NULL, list->name, list->parent_names,
+ list->num_parents, list->flags, reg_base + list->offset,
+ list->shift, list->width, list->mux_flags, &lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ list->name);
+ continue;
+ }
+
+ samsung_clk_add_lookup(clk, list->id);
+
+ /* register a clock lookup only if a clock alias is specified */
+ if (list->alias) {
+ ret = clk_register_clkdev(clk, list->alias,
+ list->dev_name);
+ if (ret)
+ pr_err("%s: failed to register lookup %s\n",
+ __func__, list->alias);
+ }
+ }
+}
+
+/* register a list of div clocks */
+void __init samsung_clk_register_div(struct samsung_div_clock *list,
+ unsigned int nr_clk)
+{
+ struct clk *clk;
+ unsigned int idx, ret;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ if (list->table)
+ clk = clk_register_divider_table(NULL, list->name,
+ list->parent_name, list->flags,
+ reg_base + list->offset, list->shift,
+ list->width, list->div_flags,
+ list->table, &lock);
+ else
+ clk = clk_register_divider(NULL, list->name,
+ list->parent_name, list->flags,
+ reg_base + list->offset, list->shift,
+ list->width, list->div_flags, &lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ list->name);
+ continue;
+ }
+
+ samsung_clk_add_lookup(clk, list->id);
+
+ /* register a clock lookup only if a clock alias is specified */
+ if (list->alias) {
+ ret = clk_register_clkdev(clk, list->alias,
+ list->dev_name);
+ if (ret)
+ pr_err("%s: failed to register lookup %s\n",
+ __func__, list->alias);
+ }
+ }
+}
+
+/* register a list of gate clocks */
+void __init samsung_clk_register_gate(struct samsung_gate_clock *list,
+ unsigned int nr_clk)
+{
+ struct clk *clk;
+ unsigned int idx, ret;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ clk = clk_register_gate(NULL, list->name, list->parent_name,
+ list->flags, reg_base + list->offset,
+ list->bit_idx, list->gate_flags, &lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ list->name);
+ continue;
+ }
+
+ /* register a clock lookup only if a clock alias is specified */
+ if (list->alias) {
+ ret = clk_register_clkdev(clk, list->alias,
+ list->dev_name);
+ if (ret)
+ pr_err("%s: failed to register lookup %s\n",
+ __func__, list->alias);
+ }
+
+ samsung_clk_add_lookup(clk, list->id);
+ }
+}
+
+/*
+ * obtain the clock speed of all external fixed clock sources from device
+ * tree and register it
+ */
+#ifdef CONFIG_OF
+void __init samsung_clk_of_register_fixed_ext(
+ struct samsung_fixed_rate_clock *fixed_rate_clk,
+ unsigned int nr_fixed_rate_clk,
+ struct of_device_id *clk_matches)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ u32 freq;
+
+ for_each_matching_node_and_match(np, clk_matches, &match) {
+ if (of_property_read_u32(np, "clock-frequency", &freq))
+ continue;
+ fixed_rate_clk[(u32)match->data].fixed_rate = freq;
+ }
+ samsung_clk_register_fixed_rate(fixed_rate_clk, nr_fixed_rate_clk);
+}
+#endif
+
+/* utility function to get the rate of a specified clock */
+unsigned long _get_rate(const char *clk_name)
+{
+ struct clk *clk;
+ unsigned long rate;
+
+ clk = clk_get(NULL, clk_name);
+ if (IS_ERR(clk)) {
+ pr_err("%s: could not find clock %s\n", __func__, clk_name);
+ return 0;
+ }
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+ return rate;
+}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
new file mode 100644
index 0000000..e4ad6ea
--- /dev/null
+++ b/drivers/clk/samsung/clk.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all Samsung platforms
+*/
+
+#ifndef __SAMSUNG_CLK_H
+#define __SAMSUNG_CLK_H
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/**
+ * struct samsung_clock_alias: information about mux clock
+ * @id: platform specific id of the clock.
+ * @dev_name: name of the device to which this clock belongs.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_clock_alias {
+ unsigned int id;
+ const char *dev_name;
+ const char *alias;
+};
+
+#define ALIAS(_id, dname, a) \
+ { \
+ .id = _id, \
+ .dev_name = dname, \
+ .alias = a, \
+ }
+
+/**
+ * struct samsung_fixed_rate_clock: information about fixed-rate clock
+ * @id: platform specific id of the clock.
+ * @name: name of this fixed-rate clock.
+ * @parent_name: optional parent clock name.
+ * @flags: optional fixed-rate clock flags.
+ * @fixed-rate: fixed clock rate of this clock.
+ */
+struct samsung_fixed_rate_clock {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long fixed_rate;
+};
+
+#define FRATE(_id, cname, pname, f, frate) \
+ { \
+ .id = _id, \
+ .name = cname, \
+ .parent_name = pname, \
+ .flags = f, \
+ .fixed_rate = frate, \
+ }
+
+/*
+ * struct samsung_fixed_factor_clock: information about fixed-factor clock
+ * @id: platform specific id of the clock.
+ * @name: name of this fixed-factor clock.
+ * @parent_name: parent clock name.
+ * @mult: fixed multiplication factor.
+ * @div: fixed division factor.
+ * @flags: optional fixed-factor clock flags.
+ */
+struct samsung_fixed_factor_clock {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long mult;
+ unsigned long div;
+ unsigned long flags;
+};
+
+#define FFACTOR(_id, cname, pname, m, d, f) \
+ { \
+ .id = _id, \
+ .name = cname, \
+ .parent_name = pname, \
+ .mult = m, \
+ .div = d, \
+ .flags = f, \
+ }
+
+/**
+ * struct samsung_mux_clock: information about mux clock
+ * @id: platform specific id of the clock.
+ * @dev_name: name of the device to which this clock belongs.
+ * @name: name of this mux clock.
+ * @parent_names: array of pointer to parent clock names.
+ * @num_parents: number of parents listed in @parent_names.
+ * @flags: optional flags for basic clock.
+ * @offset: offset of the register for configuring the mux.
+ * @shift: starting bit location of the mux control bit-field in @reg.
+ * @width: width of the mux control bit-field in @reg.
+ * @mux_flags: flags for mux-type clock.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_mux_clock {
+ unsigned int id;
+ const char *dev_name;
+ const char *name;
+ const char **parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 mux_flags;
+ const char *alias;
+};
+
+#define __MUX(_id, dname, cname, pnames, o, s, w, f, mf, a) \
+ { \
+ .id = _id, \
+ .dev_name = dname, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .offset = o, \
+ .shift = s, \
+ .width = w, \
+ .mux_flags = mf, \
+ .alias = a, \
+ }
+
+#define MUX(_id, cname, pnames, o, s, w) \
+ __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, NULL)
+
+#define MUX_A(_id, cname, pnames, o, s, w, a) \
+ __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, a)
+
+#define MUX_F(_id, cname, pnames, o, s, w, f, mf) \
+ __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, NULL)
+
+/**
+ * @id: platform specific id of the clock.
+ * struct samsung_div_clock: information about div clock
+ * @dev_name: name of the device to which this clock belongs.
+ * @name: name of this div clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @offset: offset of the register for configuring the div.
+ * @shift: starting bit location of the div control bit-field in @reg.
+ * @div_flags: flags for div-type clock.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_div_clock {
+ unsigned int id;
+ const char *dev_name;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ const char *alias;
+ struct clk_div_table *table;
+};
+
+#define __DIV(_id, dname, cname, pname, o, s, w, f, df, a, t) \
+ { \
+ .id = _id, \
+ .dev_name = dname, \
+ .name = cname, \
+ .parent_name = pname, \
+ .flags = f, \
+ .offset = o, \
+ .shift = s, \
+ .width = w, \
+ .div_flags = df, \
+ .alias = a, \
+ .table = t, \
+ }
+
+#define DIV(_id, cname, pname, o, s, w) \
+ __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, NULL)
+
+#define DIV_A(_id, cname, pname, o, s, w, a) \
+ __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, a, NULL)
+
+#define DIV_F(_id, cname, pname, o, s, w, f, df) \
+ __DIV(_id, NULL, cname, pname, o, s, w, f, df, NULL, NULL)
+
+#define DIV_T(_id, cname, pname, o, s, w, t) \
+ __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, t)
+
+/**
+ * struct samsung_gate_clock: information about gate clock
+ * @id: platform specific id of the clock.
+ * @dev_name: name of the device to which this clock belongs.
+ * @name: name of this gate clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @offset: offset of the register for configuring the gate.
+ * @bit_idx: bit index of the gate control bit-field in @reg.
+ * @gate_flags: flags for gate-type clock.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_gate_clock {
+ unsigned int id;
+ const char *dev_name;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 bit_idx;
+ u8 gate_flags;
+ const char *alias;
+};
+
+#define __GATE(_id, dname, cname, pname, o, b, f, gf, a) \
+ { \
+ .id = _id, \
+ .dev_name = dname, \
+ .name = cname, \
+ .parent_name = pname, \
+ .flags = f, \
+ .offset = o, \
+ .bit_idx = b, \
+ .gate_flags = gf, \
+ .alias = a, \
+ }
+
+#define GATE(_id, cname, pname, o, b, f, gf) \
+ __GATE(_id, NULL, cname, pname, o, b, f, gf, NULL)
+
+#define GATE_A(_id, cname, pname, o, b, f, gf, a) \
+ __GATE(_id, NULL, cname, pname, o, b, f, gf, a)
+
+#define GATE_D(_id, dname, cname, pname, o, b, f, gf) \
+ __GATE(_id, dname, cname, pname, o, b, f, gf, NULL)
+
+#define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
+ __GATE(_id, dname, cname, pname, o, b, f, gf, a)
+
+#define PNAME(x) static const char *x[] __initdata
+
+/**
+ * struct samsung_clk_reg_dump: register dump of clock controller registers.
+ * @offset: clock register offset from the controller base address.
+ * @value: the value to be register at offset.
+ */
+struct samsung_clk_reg_dump {
+ u32 offset;
+ u32 value;
+};
+
+extern void __init samsung_clk_init(struct device_node *np, void __iomem *base,
+ unsigned long nr_clks, unsigned long *rdump,
+ unsigned long nr_rdump, unsigned long *soc_rdump,
+ unsigned long nr_soc_rdump);
+extern void __init samsung_clk_of_register_fixed_ext(
+ struct samsung_fixed_rate_clock *fixed_rate_clk,
+ unsigned int nr_fixed_rate_clk,
+ struct of_device_id *clk_matches);
+
+extern void samsung_clk_add_lookup(struct clk *clk, unsigned int id);
+
+extern void samsung_clk_register_alias(struct samsung_clock_alias *list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_fixed_rate(
+ struct samsung_fixed_rate_clock *clk_list, unsigned int nr_clk);
+extern void __init samsung_clk_register_fixed_factor(
+ struct samsung_fixed_factor_clock *list, unsigned int nr_clk);
+extern void __init samsung_clk_register_mux(struct samsung_mux_clock *clk_list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_div(struct samsung_div_clock *clk_list,
+ unsigned int nr_clk);
+extern void __init samsung_clk_register_gate(
+ struct samsung_gate_clock *clk_list, unsigned int nr_clk);
+
+extern unsigned long _get_rate(const char *clk_name);
+
+#endif /* __SAMSUNG_CLK_H */
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 2c855a6..bd11315 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2012 Altera Corporation <www.altera.com>
+ * Copyright 2011-2012 Calxeda, Inc.
+ * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,41 +12,161 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
+ * Based from clk-highbank.c
+ *
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
-#define SOCFPGA_OSC1_CLK 10000000
-#define SOCFPGA_MPU_CLK 800000000
-#define SOCFPGA_MAIN_QSPI_CLK 432000000
-#define SOCFPGA_MAIN_NAND_SDMMC_CLK 250000000
-#define SOCFPGA_S2F_USR_CLK 125000000
+/* Clock Manager offsets */
+#define CLKMGR_CTRL 0x0
+#define CLKMGR_BYPASS 0x4
-void __init socfpga_init_clocks(void)
+/* Clock bypass bits */
+#define MAINPLL_BYPASS (1<<0)
+#define SDRAMPLL_BYPASS (1<<1)
+#define SDRAMPLL_SRC_BYPASS (1<<2)
+#define PERPLL_BYPASS (1<<3)
+#define PERPLL_SRC_BYPASS (1<<4)
+
+#define SOCFPGA_PLL_BG_PWRDWN 0
+#define SOCFPGA_PLL_EXT_ENA 1
+#define SOCFPGA_PLL_PWR_DOWN 2
+#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8
+#define SOCFPGA_PLL_DIVF_SHIFT 3
+#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000
+#define SOCFPGA_PLL_DIVQ_SHIFT 16
+
+extern void __iomem *clk_mgr_base_addr;
+
+struct socfpga_clk {
+ struct clk_gate hw;
+ char *parent_name;
+ char *clk_name;
+ u32 fixed_div;
+};
+#define to_socfpga_clk(p) container_of(p, struct socfpga_clk, hw.hw)
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
{
+ struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long divf, divq, vco_freq, reg;
+ unsigned long bypass;
+
+ reg = readl(socfpgaclk->hw.reg);
+ bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
+ if (bypass & MAINPLL_BYPASS)
+ return parent_rate;
+
+ divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT;
+ divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT;
+ vco_freq = parent_rate * (divf + 1);
+ return vco_freq / (1 + divq);
+}
+
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+};
+
+static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 div;
+
+ if (socfpgaclk->fixed_div)
+ div = socfpgaclk->fixed_div;
+ else
+ div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
+
+ return parent_rate / div;
+}
+
+static const struct clk_ops periclk_ops = {
+ .recalc_rate = clk_periclk_recalc_rate,
+};
+
+static __init struct clk *socfpga_clk_init(struct device_node *node,
+ const struct clk_ops *ops)
+{
+ u32 reg;
struct clk *clk;
+ struct socfpga_clk *socfpga_clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ struct clk_init_data init;
+ int rc;
+ u32 fixed_div;
+
+ rc = of_property_read_u32(node, "reg", &reg);
+ if (WARN_ON(rc))
+ return NULL;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (WARN_ON(!socfpga_clk))
+ return NULL;
+
+ socfpga_clk->hw.reg = clk_mgr_base_addr + reg;
+
+ rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+ if (rc)
+ socfpga_clk->fixed_div = 0;
+ else
+ socfpga_clk->fixed_div = fixed_div;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = 0;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
- clk = clk_register_fixed_rate(NULL, "osc1_clk", NULL, CLK_IS_ROOT, SOCFPGA_OSC1_CLK);
- clk_register_clkdev(clk, "osc1_clk", NULL);
+ socfpga_clk->hw.hw.init = &init;
- clk = clk_register_fixed_rate(NULL, "mpu_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK);
- clk_register_clkdev(clk, "mpu_clk", NULL);
+ if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
+ strcmp(clk_name, "sdram_pll")) {
+ socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
+ clk_pll_ops.enable = clk_gate_ops.enable;
+ clk_pll_ops.disable = clk_gate_ops.disable;
+ }
- clk = clk_register_fixed_rate(NULL, "main_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
- clk_register_clkdev(clk, "main_clk", NULL);
+ clk = clk_register(NULL, &socfpga_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(socfpga_clk);
+ return NULL;
+ }
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return clk;
+}
- clk = clk_register_fixed_rate(NULL, "dbg_base_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
- clk_register_clkdev(clk, "dbg_base_clk", NULL);
+static void __init socfpga_pll_init(struct device_node *node)
+{
+ socfpga_clk_init(node, &clk_pll_ops);
+}
+CLK_OF_DECLARE(socfpga_pll, "altr,socfpga-pll-clock", socfpga_pll_init);
- clk = clk_register_fixed_rate(NULL, "main_qspi_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_QSPI_CLK);
- clk_register_clkdev(clk, "main_qspi_clk", NULL);
+static void __init socfpga_periph_init(struct device_node *node)
+{
+ socfpga_clk_init(node, &periclk_ops);
+}
+CLK_OF_DECLARE(socfpga_periph, "altr,socfpga-perip-clk", socfpga_periph_init);
- clk = clk_register_fixed_rate(NULL, "main_nand_sdmmc_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_NAND_SDMMC_CLK);
- clk_register_clkdev(clk, "main_nand_sdmmc_clk", NULL);
+void __init socfpga_init_clocks(void)
+{
+ struct clk *clk;
+ int ret;
- clk = clk_register_fixed_rate(NULL, "s2f_usr_clk", NULL, CLK_IS_ROOT, SOCFPGA_S2F_USR_CLK);
- clk_register_clkdev(clk, "s2f_usr_clk", NULL);
+ clk = clk_register_fixed_factor(NULL, "smp_twd", "mpuclk", 0, 1, 4);
+ ret = clk_register_clkdev(clk, NULL, "smp_twd");
+ if (ret)
+ pr_err("smp_twd alias not registered\n");
}
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index ed9af42..aedbbe1 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -17,12 +17,10 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/spinlock_types.h>
-#include <mach/spear.h>
#include "clk.h"
-#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
/* PLL related registers and bit values */
-#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
+#define SPEAR1310_PLL_CFG (misc_base + 0x210)
/* PLL_CFG bit values */
#define SPEAR1310_CLCD_SYNT_CLK_MASK 1
#define SPEAR1310_CLCD_SYNT_CLK_SHIFT 31
@@ -35,15 +33,15 @@
#define SPEAR1310_PLL2_CLK_SHIFT 22
#define SPEAR1310_PLL1_CLK_SHIFT 20
-#define SPEAR1310_PLL1_CTR (VA_MISC_BASE + 0x214)
-#define SPEAR1310_PLL1_FRQ (VA_MISC_BASE + 0x218)
-#define SPEAR1310_PLL2_CTR (VA_MISC_BASE + 0x220)
-#define SPEAR1310_PLL2_FRQ (VA_MISC_BASE + 0x224)
-#define SPEAR1310_PLL3_CTR (VA_MISC_BASE + 0x22C)
-#define SPEAR1310_PLL3_FRQ (VA_MISC_BASE + 0x230)
-#define SPEAR1310_PLL4_CTR (VA_MISC_BASE + 0x238)
-#define SPEAR1310_PLL4_FRQ (VA_MISC_BASE + 0x23C)
-#define SPEAR1310_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+#define SPEAR1310_PLL1_CTR (misc_base + 0x214)
+#define SPEAR1310_PLL1_FRQ (misc_base + 0x218)
+#define SPEAR1310_PLL2_CTR (misc_base + 0x220)
+#define SPEAR1310_PLL2_FRQ (misc_base + 0x224)
+#define SPEAR1310_PLL3_CTR (misc_base + 0x22C)
+#define SPEAR1310_PLL3_FRQ (misc_base + 0x230)
+#define SPEAR1310_PLL4_CTR (misc_base + 0x238)
+#define SPEAR1310_PLL4_FRQ (misc_base + 0x23C)
+#define SPEAR1310_PERIP_CLK_CFG (misc_base + 0x244)
/* PERIP_CLK_CFG bit values */
#define SPEAR1310_GPT_OSC24_VAL 0
#define SPEAR1310_GPT_APB_VAL 1
@@ -65,7 +63,7 @@
#define SPEAR1310_C3_CLK_MASK 1
#define SPEAR1310_C3_CLK_SHIFT 1
-#define SPEAR1310_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+#define SPEAR1310_GMAC_CLK_CFG (misc_base + 0x248)
#define SPEAR1310_GMAC_PHY_IF_SEL_MASK 3
#define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT 4
#define SPEAR1310_GMAC_PHY_CLK_MASK 1
@@ -73,7 +71,7 @@
#define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK 2
#define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT 1
-#define SPEAR1310_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+#define SPEAR1310_I2S_CLK_CFG (misc_base + 0x24C)
/* I2S_CLK_CFG register mask */
#define SPEAR1310_I2S_SCLK_X_MASK 0x1F
#define SPEAR1310_I2S_SCLK_X_SHIFT 27
@@ -91,21 +89,21 @@
#define SPEAR1310_I2S_SRC_CLK_MASK 2
#define SPEAR1310_I2S_SRC_CLK_SHIFT 0
-#define SPEAR1310_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
-#define SPEAR1310_UART_CLK_SYNT (VA_MISC_BASE + 0x254)
-#define SPEAR1310_GMAC_CLK_SYNT (VA_MISC_BASE + 0x258)
-#define SPEAR1310_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x25C)
-#define SPEAR1310_CFXD_CLK_SYNT (VA_MISC_BASE + 0x260)
-#define SPEAR1310_ADC_CLK_SYNT (VA_MISC_BASE + 0x264)
-#define SPEAR1310_AMBA_CLK_SYNT (VA_MISC_BASE + 0x268)
-#define SPEAR1310_CLCD_CLK_SYNT (VA_MISC_BASE + 0x270)
-#define SPEAR1310_RAS_CLK_SYNT0 (VA_MISC_BASE + 0x280)
-#define SPEAR1310_RAS_CLK_SYNT1 (VA_MISC_BASE + 0x288)
-#define SPEAR1310_RAS_CLK_SYNT2 (VA_MISC_BASE + 0x290)
-#define SPEAR1310_RAS_CLK_SYNT3 (VA_MISC_BASE + 0x298)
+#define SPEAR1310_C3_CLK_SYNT (misc_base + 0x250)
+#define SPEAR1310_UART_CLK_SYNT (misc_base + 0x254)
+#define SPEAR1310_GMAC_CLK_SYNT (misc_base + 0x258)
+#define SPEAR1310_SDHCI_CLK_SYNT (misc_base + 0x25C)
+#define SPEAR1310_CFXD_CLK_SYNT (misc_base + 0x260)
+#define SPEAR1310_ADC_CLK_SYNT (misc_base + 0x264)
+#define SPEAR1310_AMBA_CLK_SYNT (misc_base + 0x268)
+#define SPEAR1310_CLCD_CLK_SYNT (misc_base + 0x270)
+#define SPEAR1310_RAS_CLK_SYNT0 (misc_base + 0x280)
+#define SPEAR1310_RAS_CLK_SYNT1 (misc_base + 0x288)
+#define SPEAR1310_RAS_CLK_SYNT2 (misc_base + 0x290)
+#define SPEAR1310_RAS_CLK_SYNT3 (misc_base + 0x298)
/* Check Fractional synthesizer reg masks */
-#define SPEAR1310_PERIP1_CLK_ENB (VA_MISC_BASE + 0x300)
+#define SPEAR1310_PERIP1_CLK_ENB (misc_base + 0x300)
/* PERIP1_CLK_ENB register masks */
#define SPEAR1310_RTC_CLK_ENB 31
#define SPEAR1310_ADC_CLK_ENB 30
@@ -138,7 +136,7 @@
#define SPEAR1310_SYSROM_CLK_ENB 1
#define SPEAR1310_BUS_CLK_ENB 0
-#define SPEAR1310_PERIP2_CLK_ENB (VA_MISC_BASE + 0x304)
+#define SPEAR1310_PERIP2_CLK_ENB (misc_base + 0x304)
/* PERIP2_CLK_ENB register masks */
#define SPEAR1310_THSENS_CLK_ENB 8
#define SPEAR1310_I2S_REF_PAD_CLK_ENB 7
@@ -150,7 +148,7 @@
#define SPEAR1310_DDR_CORE_CLK_ENB 1
#define SPEAR1310_DDR_CTRL_CLK_ENB 0
-#define SPEAR1310_RAS_CLK_ENB (VA_MISC_BASE + 0x310)
+#define SPEAR1310_RAS_CLK_ENB (misc_base + 0x310)
/* RAS_CLK_ENB register masks */
#define SPEAR1310_SYNT3_CLK_ENB 17
#define SPEAR1310_SYNT2_CLK_ENB 16
@@ -172,7 +170,7 @@
#define SPEAR1310_ACLK_CLK_ENB 0
/* RAS Area Control Register */
-#define SPEAR1310_RAS_CTRL_REG0 (VA_SPEAR1310_RAS_BASE + 0x000)
+#define SPEAR1310_RAS_CTRL_REG0 (ras_base + 0x000)
#define SPEAR1310_SSP1_CLK_MASK 3
#define SPEAR1310_SSP1_CLK_SHIFT 26
#define SPEAR1310_TDM_CLK_MASK 1
@@ -197,12 +195,12 @@
#define SPEAR1310_PCI_CLK_MASK 1
#define SPEAR1310_PCI_CLK_SHIFT 0
-#define SPEAR1310_RAS_CTRL_REG1 (VA_SPEAR1310_RAS_BASE + 0x004)
+#define SPEAR1310_RAS_CTRL_REG1 (ras_base + 0x004)
#define SPEAR1310_PHY_CLK_MASK 0x3
#define SPEAR1310_RMII_PHY_CLK_SHIFT 0
#define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT 2
-#define SPEAR1310_RAS_SW_CLK_CTRL (VA_SPEAR1310_RAS_BASE + 0x0148)
+#define SPEAR1310_RAS_SW_CLK_CTRL (ras_base + 0x0148)
#define SPEAR1310_CAN1_CLK_ENB 25
#define SPEAR1310_CAN0_CLK_ENB 24
#define SPEAR1310_GPT64_CLK_ENB 23
@@ -385,7 +383,7 @@ static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
-void __init spear1310_clk_init(void)
+void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
{
struct clk *clk, *clk1;
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 35e7e26..9d0b394 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -17,18 +17,17 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/spinlock_types.h>
-#include <mach/spear.h>
#include "clk.h"
/* Clock Configuration Registers */
-#define SPEAR1340_SYS_CLK_CTRL (VA_MISC_BASE + 0x200)
+#define SPEAR1340_SYS_CLK_CTRL (misc_base + 0x200)
#define SPEAR1340_HCLK_SRC_SEL_SHIFT 27
#define SPEAR1340_HCLK_SRC_SEL_MASK 1
#define SPEAR1340_SCLK_SRC_SEL_SHIFT 23
#define SPEAR1340_SCLK_SRC_SEL_MASK 3
/* PLL related registers and bit values */
-#define SPEAR1340_PLL_CFG (VA_MISC_BASE + 0x210)
+#define SPEAR1340_PLL_CFG (misc_base + 0x210)
/* PLL_CFG bit values */
#define SPEAR1340_CLCD_SYNT_CLK_MASK 1
#define SPEAR1340_CLCD_SYNT_CLK_SHIFT 31
@@ -40,15 +39,15 @@
#define SPEAR1340_PLL2_CLK_SHIFT 22
#define SPEAR1340_PLL1_CLK_SHIFT 20
-#define SPEAR1340_PLL1_CTR (VA_MISC_BASE + 0x214)
-#define SPEAR1340_PLL1_FRQ (VA_MISC_BASE + 0x218)
-#define SPEAR1340_PLL2_CTR (VA_MISC_BASE + 0x220)
-#define SPEAR1340_PLL2_FRQ (VA_MISC_BASE + 0x224)
-#define SPEAR1340_PLL3_CTR (VA_MISC_BASE + 0x22C)
-#define SPEAR1340_PLL3_FRQ (VA_MISC_BASE + 0x230)
-#define SPEAR1340_PLL4_CTR (VA_MISC_BASE + 0x238)
-#define SPEAR1340_PLL4_FRQ (VA_MISC_BASE + 0x23C)
-#define SPEAR1340_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+#define SPEAR1340_PLL1_CTR (misc_base + 0x214)
+#define SPEAR1340_PLL1_FRQ (misc_base + 0x218)
+#define SPEAR1340_PLL2_CTR (misc_base + 0x220)
+#define SPEAR1340_PLL2_FRQ (misc_base + 0x224)
+#define SPEAR1340_PLL3_CTR (misc_base + 0x22C)
+#define SPEAR1340_PLL3_FRQ (misc_base + 0x230)
+#define SPEAR1340_PLL4_CTR (misc_base + 0x238)
+#define SPEAR1340_PLL4_FRQ (misc_base + 0x23C)
+#define SPEAR1340_PERIP_CLK_CFG (misc_base + 0x244)
/* PERIP_CLK_CFG bit values */
#define SPEAR1340_SPDIF_CLK_MASK 1
#define SPEAR1340_SPDIF_OUT_CLK_SHIFT 15
@@ -66,13 +65,13 @@
#define SPEAR1340_C3_CLK_MASK 1
#define SPEAR1340_C3_CLK_SHIFT 1
-#define SPEAR1340_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+#define SPEAR1340_GMAC_CLK_CFG (misc_base + 0x248)
#define SPEAR1340_GMAC_PHY_CLK_MASK 1
#define SPEAR1340_GMAC_PHY_CLK_SHIFT 2
#define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK 2
#define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT 0
-#define SPEAR1340_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+#define SPEAR1340_I2S_CLK_CFG (misc_base + 0x24C)
/* I2S_CLK_CFG register mask */
#define SPEAR1340_I2S_SCLK_X_MASK 0x1F
#define SPEAR1340_I2S_SCLK_X_SHIFT 27
@@ -90,21 +89,21 @@
#define SPEAR1340_I2S_SRC_CLK_MASK 2
#define SPEAR1340_I2S_SRC_CLK_SHIFT 0
-#define SPEAR1340_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
-#define SPEAR1340_UART0_CLK_SYNT (VA_MISC_BASE + 0x254)
-#define SPEAR1340_UART1_CLK_SYNT (VA_MISC_BASE + 0x258)
-#define SPEAR1340_GMAC_CLK_SYNT (VA_MISC_BASE + 0x25C)
-#define SPEAR1340_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x260)
-#define SPEAR1340_CFXD_CLK_SYNT (VA_MISC_BASE + 0x264)
-#define SPEAR1340_ADC_CLK_SYNT (VA_MISC_BASE + 0x270)
-#define SPEAR1340_AMBA_CLK_SYNT (VA_MISC_BASE + 0x274)
-#define SPEAR1340_CLCD_CLK_SYNT (VA_MISC_BASE + 0x27C)
-#define SPEAR1340_SYS_CLK_SYNT (VA_MISC_BASE + 0x284)
-#define SPEAR1340_GEN_CLK_SYNT0 (VA_MISC_BASE + 0x28C)
-#define SPEAR1340_GEN_CLK_SYNT1 (VA_MISC_BASE + 0x294)
-#define SPEAR1340_GEN_CLK_SYNT2 (VA_MISC_BASE + 0x29C)
-#define SPEAR1340_GEN_CLK_SYNT3 (VA_MISC_BASE + 0x304)
-#define SPEAR1340_PERIP1_CLK_ENB (VA_MISC_BASE + 0x30C)
+#define SPEAR1340_C3_CLK_SYNT (misc_base + 0x250)
+#define SPEAR1340_UART0_CLK_SYNT (misc_base + 0x254)
+#define SPEAR1340_UART1_CLK_SYNT (misc_base + 0x258)
+#define SPEAR1340_GMAC_CLK_SYNT (misc_base + 0x25C)
+#define SPEAR1340_SDHCI_CLK_SYNT (misc_base + 0x260)
+#define SPEAR1340_CFXD_CLK_SYNT (misc_base + 0x264)
+#define SPEAR1340_ADC_CLK_SYNT (misc_base + 0x270)
+#define SPEAR1340_AMBA_CLK_SYNT (misc_base + 0x274)
+#define SPEAR1340_CLCD_CLK_SYNT (misc_base + 0x27C)
+#define SPEAR1340_SYS_CLK_SYNT (misc_base + 0x284)
+#define SPEAR1340_GEN_CLK_SYNT0 (misc_base + 0x28C)
+#define SPEAR1340_GEN_CLK_SYNT1 (misc_base + 0x294)
+#define SPEAR1340_GEN_CLK_SYNT2 (misc_base + 0x29C)
+#define SPEAR1340_GEN_CLK_SYNT3 (misc_base + 0x304)
+#define SPEAR1340_PERIP1_CLK_ENB (misc_base + 0x30C)
#define SPEAR1340_RTC_CLK_ENB 31
#define SPEAR1340_ADC_CLK_ENB 30
#define SPEAR1340_C3_CLK_ENB 29
@@ -133,7 +132,7 @@
#define SPEAR1340_SYSROM_CLK_ENB 1
#define SPEAR1340_BUS_CLK_ENB 0
-#define SPEAR1340_PERIP2_CLK_ENB (VA_MISC_BASE + 0x310)
+#define SPEAR1340_PERIP2_CLK_ENB (misc_base + 0x310)
#define SPEAR1340_THSENS_CLK_ENB 8
#define SPEAR1340_I2S_REF_PAD_CLK_ENB 7
#define SPEAR1340_ACP_CLK_ENB 6
@@ -144,7 +143,7 @@
#define SPEAR1340_DDR_CORE_CLK_ENB 1
#define SPEAR1340_DDR_CTRL_CLK_ENB 0
-#define SPEAR1340_PERIP3_CLK_ENB (VA_MISC_BASE + 0x314)
+#define SPEAR1340_PERIP3_CLK_ENB (misc_base + 0x314)
#define SPEAR1340_PLGPIO_CLK_ENB 18
#define SPEAR1340_VIDEO_DEC_CLK_ENB 16
#define SPEAR1340_VIDEO_ENC_CLK_ENB 15
@@ -441,7 +440,7 @@ static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco2div2_clk",
"pll2_clk", };
-void __init spear1340_clk_init(void)
+void __init spear1340_clk_init(void __iomem *misc_base)
{
struct clk *clk, *clk1;
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 33d3ac5..f9ec43f 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -15,21 +15,20 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/spinlock_types.h>
-#include <mach/misc_regs.h>
#include "clk.h"
static DEFINE_SPINLOCK(_lock);
-#define PLL1_CTR (MISC_BASE + 0x008)
-#define PLL1_FRQ (MISC_BASE + 0x00C)
-#define PLL2_CTR (MISC_BASE + 0x014)
-#define PLL2_FRQ (MISC_BASE + 0x018)
-#define PLL_CLK_CFG (MISC_BASE + 0x020)
+#define PLL1_CTR (misc_base + 0x008)
+#define PLL1_FRQ (misc_base + 0x00C)
+#define PLL2_CTR (misc_base + 0x014)
+#define PLL2_FRQ (misc_base + 0x018)
+#define PLL_CLK_CFG (misc_base + 0x020)
/* PLL_CLK_CFG register masks */
#define MCTR_CLK_SHIFT 28
#define MCTR_CLK_MASK 3
-#define CORE_CLK_CFG (MISC_BASE + 0x024)
+#define CORE_CLK_CFG (misc_base + 0x024)
/* CORE CLK CFG register masks */
#define GEN_SYNTH2_3_CLK_SHIFT 18
#define GEN_SYNTH2_3_CLK_MASK 1
@@ -39,7 +38,7 @@ static DEFINE_SPINLOCK(_lock);
#define PCLK_RATIO_SHIFT 8
#define PCLK_RATIO_MASK 2
-#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+#define PERIP_CLK_CFG (misc_base + 0x028)
/* PERIP_CLK_CFG register masks */
#define UART_CLK_SHIFT 4
#define UART_CLK_MASK 1
@@ -50,7 +49,7 @@ static DEFINE_SPINLOCK(_lock);
#define GPT2_CLK_SHIFT 12
#define GPT_CLK_MASK 1
-#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+#define PERIP1_CLK_ENB (misc_base + 0x02C)
/* PERIP1_CLK_ENB register masks */
#define UART_CLK_ENB 3
#define SSP_CLK_ENB 5
@@ -69,7 +68,7 @@ static DEFINE_SPINLOCK(_lock);
#define USBH_CLK_ENB 25
#define C3_CLK_ENB 31
-#define RAS_CLK_ENB (MISC_BASE + 0x034)
+#define RAS_CLK_ENB (misc_base + 0x034)
#define RAS_AHB_CLK_ENB 0
#define RAS_PLL1_CLK_ENB 1
#define RAS_APB_CLK_ENB 2
@@ -82,20 +81,20 @@ static DEFINE_SPINLOCK(_lock);
#define RAS_SYNT2_CLK_ENB 10
#define RAS_SYNT3_CLK_ENB 11
-#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
-#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
-#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
-#define AMEM_CLK_CFG (MISC_BASE + 0x050)
+#define PRSC0_CLK_CFG (misc_base + 0x044)
+#define PRSC1_CLK_CFG (misc_base + 0x048)
+#define PRSC2_CLK_CFG (misc_base + 0x04C)
+#define AMEM_CLK_CFG (misc_base + 0x050)
#define AMEM_CLK_ENB 0
-#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
-#define UART_CLK_SYNT (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
-#define GEN0_CLK_SYNT (MISC_BASE + 0x06C)
-#define GEN1_CLK_SYNT (MISC_BASE + 0x070)
-#define GEN2_CLK_SYNT (MISC_BASE + 0x074)
-#define GEN3_CLK_SYNT (MISC_BASE + 0x078)
+#define CLCD_CLK_SYNT (misc_base + 0x05C)
+#define FIRDA_CLK_SYNT (misc_base + 0x060)
+#define UART_CLK_SYNT (misc_base + 0x064)
+#define GMAC_CLK_SYNT (misc_base + 0x068)
+#define GEN0_CLK_SYNT (misc_base + 0x06C)
+#define GEN1_CLK_SYNT (misc_base + 0x070)
+#define GEN2_CLK_SYNT (misc_base + 0x074)
+#define GEN3_CLK_SYNT (misc_base + 0x078)
/* pll rate configuration table, in ascending order of rates */
static struct pll_rate_tbl pll_rtbl[] = {
@@ -211,6 +210,17 @@ static inline void spear310_clk_init(void) { }
/* array of all spear 320 clock lookups */
#ifdef CONFIG_MACH_SPEAR320
+
+#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000)
+#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018)
+
+ #define SPEAR320_UARTX_PCLK_MASK 0x1
+ #define SPEAR320_UART2_PCLK_SHIFT 8
+ #define SPEAR320_UART3_PCLK_SHIFT 9
+ #define SPEAR320_UART4_PCLK_SHIFT 10
+ #define SPEAR320_UART5_PCLK_SHIFT 11
+ #define SPEAR320_UART6_PCLK_SHIFT 12
+ #define SPEAR320_RS485_PCLK_SHIFT 13
#define SMII_PCLK_SHIFT 18
#define SMII_PCLK_MASK 2
#define SMII_PCLK_VAL_PAD 0x0
@@ -235,7 +245,7 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
"ras_syn0_gclk", };
static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
-static void __init spear320_clk_init(void)
+static void __init spear320_clk_init(void __iomem *soc_config_base)
{
struct clk *clk;
@@ -362,7 +372,7 @@ static void __init spear320_clk_init(void)
static inline void spear320_clk_init(void) { }
#endif
-void __init spear3xx_clk_init(void)
+void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
{
struct clk *clk, *clk1;
@@ -634,5 +644,5 @@ void __init spear3xx_clk_init(void)
else if (of_machine_is_compatible("st,spear310"))
spear310_clk_init();
else if (of_machine_is_compatible("st,spear320"))
- spear320_clk_init();
+ spear320_clk_init(soc_config_base);
}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index e862a33..9406f24 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -13,28 +13,27 @@
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/spinlock_types.h>
-#include <mach/misc_regs.h>
#include "clk.h"
static DEFINE_SPINLOCK(_lock);
-#define PLL1_CTR (MISC_BASE + 0x008)
-#define PLL1_FRQ (MISC_BASE + 0x00C)
-#define PLL2_CTR (MISC_BASE + 0x014)
-#define PLL2_FRQ (MISC_BASE + 0x018)
-#define PLL_CLK_CFG (MISC_BASE + 0x020)
+#define PLL1_CTR (misc_base + 0x008)
+#define PLL1_FRQ (misc_base + 0x00C)
+#define PLL2_CTR (misc_base + 0x014)
+#define PLL2_FRQ (misc_base + 0x018)
+#define PLL_CLK_CFG (misc_base + 0x020)
/* PLL_CLK_CFG register masks */
#define MCTR_CLK_SHIFT 28
#define MCTR_CLK_MASK 3
-#define CORE_CLK_CFG (MISC_BASE + 0x024)
+#define CORE_CLK_CFG (misc_base + 0x024)
/* CORE CLK CFG register masks */
#define HCLK_RATIO_SHIFT 10
#define HCLK_RATIO_MASK 2
#define PCLK_RATIO_SHIFT 8
#define PCLK_RATIO_MASK 2
-#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+#define PERIP_CLK_CFG (misc_base + 0x028)
/* PERIP_CLK_CFG register masks */
#define CLCD_CLK_SHIFT 2
#define CLCD_CLK_MASK 2
@@ -48,7 +47,7 @@ static DEFINE_SPINLOCK(_lock);
#define GPT3_CLK_SHIFT 12
#define GPT_CLK_MASK 1
-#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+#define PERIP1_CLK_ENB (misc_base + 0x02C)
/* PERIP1_CLK_ENB register masks */
#define UART0_CLK_ENB 3
#define UART1_CLK_ENB 4
@@ -74,13 +73,13 @@ static DEFINE_SPINLOCK(_lock);
#define USBH0_CLK_ENB 25
#define USBH1_CLK_ENB 26
-#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
-#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
-#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+#define PRSC0_CLK_CFG (misc_base + 0x044)
+#define PRSC1_CLK_CFG (misc_base + 0x048)
+#define PRSC2_CLK_CFG (misc_base + 0x04C)
-#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
-#define UART_CLK_SYNT (MISC_BASE + 0x064)
+#define CLCD_CLK_SYNT (misc_base + 0x05C)
+#define FIRDA_CLK_SYNT (misc_base + 0x060)
+#define UART_CLK_SYNT (misc_base + 0x064)
/* vco rate configuration table, in ascending order of rates */
static struct pll_rate_tbl pll_rtbl[] = {
@@ -115,7 +114,7 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
{.mscale = 1, .nscale = 0}, /* 83 MHz */
};
-void __init spear6xx_clk_init(void)
+void __init spear6xx_clk_init(void __iomem *misc_base)
{
struct clk *clk, *clk1;
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 2b41b0f..f49fac2 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -9,3 +9,4 @@ obj-y += clk-super.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index 6dd5332..bafee98 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -41,7 +41,9 @@ static DEFINE_SPINLOCK(periph_ref_lock);
#define write_rst_clr(val, gate) \
writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
-#define periph_clk_to_bit(periph) (1 << (gate->clk_num % 32))
+#define periph_clk_to_bit(gate) (1 << (gate->clk_num % 32))
+
+#define LVL2_CLK_GATE_OVRE 0x554
/* Peripheral gate clock ops */
static int clk_periph_is_enabled(struct clk_hw *hw)
@@ -83,6 +85,13 @@ static int clk_periph_enable(struct clk_hw *hw)
}
}
+ if (gate->flags & TEGRA_PERIPH_WAR_1005168) {
+ writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(BIT(22), gate->clk_base + LVL2_CLK_GATE_OVRE);
+ udelay(1);
+ writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
+ }
+
spin_unlock_irqrestore(&periph_ref_lock, flags);
return 0;
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 788486e..b2309d3 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -128,6 +129,7 @@ void tegra_periph_reset_deassert(struct clk *c)
tegra_periph_reset(gate, 0);
}
+EXPORT_SYMBOL(tegra_periph_reset_deassert);
void tegra_periph_reset_assert(struct clk *c)
{
@@ -147,6 +149,7 @@ void tegra_periph_reset_assert(struct clk *c)
tegra_periph_reset(gate, 1);
}
+EXPORT_SYMBOL(tegra_periph_reset_assert);
const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
@@ -170,14 +173,15 @@ const struct clk_ops tegra_clk_periph_nodiv_ops = {
static struct clk *_tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph,
- void __iomem *clk_base, u32 offset, bool div)
+ void __iomem *clk_base, u32 offset, bool div,
+ unsigned long flags)
{
struct clk *clk;
struct clk_init_data init;
init.name = name;
init.ops = div ? &tegra_clk_periph_ops : &tegra_clk_periph_nodiv_ops;
- init.flags = div ? 0 : CLK_SET_RATE_PARENT;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -202,10 +206,10 @@ static struct clk *_tegra_clk_register_periph(const char *name,
struct clk *tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
- u32 offset)
+ u32 offset, unsigned long flags)
{
return _tegra_clk_register_periph(name, parent_names, num_parents,
- periph, clk_base, offset, true);
+ periph, clk_base, offset, true, flags);
}
struct clk *tegra_clk_register_periph_nodiv(const char *name,
@@ -214,5 +218,5 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
u32 offset)
{
return _tegra_clk_register_periph(name, parent_names, num_parents,
- periph, clk_base, offset, false);
+ periph, clk_base, offset, false, CLK_SET_RATE_PARENT);
}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 165f247..17c2cc0 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -79,6 +79,48 @@
#define PLLE_SS_CTRL 0x68
#define PLLE_SS_DISABLE (7 << 10)
+#define PLLE_AUX_PLLP_SEL BIT(2)
+#define PLLE_AUX_ENABLE_SWCTL BIT(4)
+#define PLLE_AUX_SEQ_ENABLE BIT(24)
+#define PLLE_AUX_PLLRE_SEL BIT(28)
+
+#define PLLE_MISC_PLLE_PTS BIT(8)
+#define PLLE_MISC_IDDQ_SW_VALUE BIT(13)
+#define PLLE_MISC_IDDQ_SW_CTRL BIT(14)
+#define PLLE_MISC_VREG_BG_CTRL_SHIFT 4
+#define PLLE_MISC_VREG_BG_CTRL_MASK (3 << PLLE_MISC_VREG_BG_CTRL_SHIFT)
+#define PLLE_MISC_VREG_CTRL_SHIFT 2
+#define PLLE_MISC_VREG_CTRL_MASK (2 << PLLE_MISC_VREG_CTRL_SHIFT)
+
+#define PLLCX_MISC_STROBE BIT(31)
+#define PLLCX_MISC_RESET BIT(30)
+#define PLLCX_MISC_SDM_DIV_SHIFT 28
+#define PLLCX_MISC_SDM_DIV_MASK (0x3 << PLLCX_MISC_SDM_DIV_SHIFT)
+#define PLLCX_MISC_FILT_DIV_SHIFT 26
+#define PLLCX_MISC_FILT_DIV_MASK (0x3 << PLLCX_MISC_FILT_DIV_SHIFT)
+#define PLLCX_MISC_ALPHA_SHIFT 18
+#define PLLCX_MISC_DIV_LOW_RANGE \
+ ((0x1 << PLLCX_MISC_SDM_DIV_SHIFT) | \
+ (0x1 << PLLCX_MISC_FILT_DIV_SHIFT))
+#define PLLCX_MISC_DIV_HIGH_RANGE \
+ ((0x2 << PLLCX_MISC_SDM_DIV_SHIFT) | \
+ (0x2 << PLLCX_MISC_FILT_DIV_SHIFT))
+#define PLLCX_MISC_COEF_LOW_RANGE \
+ ((0x14 << PLLCX_MISC_KA_SHIFT) | (0x38 << PLLCX_MISC_KB_SHIFT))
+#define PLLCX_MISC_KA_SHIFT 2
+#define PLLCX_MISC_KB_SHIFT 9
+#define PLLCX_MISC_DEFAULT (PLLCX_MISC_COEF_LOW_RANGE | \
+ (0x19 << PLLCX_MISC_ALPHA_SHIFT) | \
+ PLLCX_MISC_DIV_LOW_RANGE | \
+ PLLCX_MISC_RESET)
+#define PLLCX_MISC1_DEFAULT 0x000d2308
+#define PLLCX_MISC2_DEFAULT 0x30211200
+#define PLLCX_MISC3_DEFAULT 0x200
+
+#define PMC_PLLM_WB0_OVERRIDE 0x1dc
+#define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
+#define PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK BIT(27)
+
#define PMC_SATA_PWRGT 0x1ac
#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
@@ -101,6 +143,24 @@
#define divn_max(p) (divn_mask(p))
#define divp_max(p) (1 << (divp_mask(p)))
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+/* PLLXC has 4-bit PDIV, but entry 15 is not allowed in h/w */
+#define PLLXC_PDIV_MAX 14
+
+/* non-monotonic mapping below is not a typo */
+static u8 pllxc_p[PLLXC_PDIV_MAX + 1] = {
+ /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
+ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32
+};
+
+#define PLLCX_PDIV_MAX 7
+static u8 pllcx_p[PLLCX_PDIV_MAX + 1] = {
+ /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7 */
+ /* p: */ 1, 2, 3, 4, 6, 8, 12, 16
+};
+#endif
+
static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
{
u32 val;
@@ -108,25 +168,36 @@ static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
if (!(pll->flags & TEGRA_PLL_USE_LOCK))
return;
+ if (!(pll->flags & TEGRA_PLL_HAS_LOCK_ENABLE))
+ return;
+
val = pll_readl_misc(pll);
val |= BIT(pll->params->lock_enable_bit_idx);
pll_writel_misc(val, pll);
}
-static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll,
- void __iomem *lock_addr, u32 lock_bit_idx)
+static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll)
{
int i;
- u32 val;
+ u32 val, lock_mask;
+ void __iomem *lock_addr;
if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
udelay(pll->params->lock_delay);
return 0;
}
+ lock_addr = pll->clk_base;
+ if (pll->flags & TEGRA_PLL_LOCK_MISC)
+ lock_addr += pll->params->misc_reg;
+ else
+ lock_addr += pll->params->base_reg;
+
+ lock_mask = pll->params->lock_mask;
+
for (i = 0; i < pll->params->lock_delay; i++) {
val = readl_relaxed(lock_addr);
- if (val & BIT(lock_bit_idx)) {
+ if ((val & lock_mask) == lock_mask) {
udelay(PLL_POST_LOCK_DELAY);
return 0;
}
@@ -155,7 +226,7 @@ static int clk_pll_is_enabled(struct clk_hw *hw)
return val & PLL_BASE_ENABLE ? 1 : 0;
}
-static int _clk_pll_enable(struct clk_hw *hw)
+static void _clk_pll_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
@@ -163,7 +234,8 @@ static int _clk_pll_enable(struct clk_hw *hw)
clk_pll_enable_lock(pll);
val = pll_readl_base(pll);
- val &= ~PLL_BASE_BYPASS;
+ if (pll->flags & TEGRA_PLL_BYPASS)
+ val &= ~PLL_BASE_BYPASS;
val |= PLL_BASE_ENABLE;
pll_writel_base(val, pll);
@@ -172,11 +244,6 @@ static int _clk_pll_enable(struct clk_hw *hw)
val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
}
-
- clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->base_reg,
- pll->params->lock_bit_idx);
-
- return 0;
}
static void _clk_pll_disable(struct clk_hw *hw)
@@ -185,7 +252,9 @@ static void _clk_pll_disable(struct clk_hw *hw)
u32 val;
val = pll_readl_base(pll);
- val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ if (pll->flags & TEGRA_PLL_BYPASS)
+ val &= ~PLL_BASE_BYPASS;
+ val &= ~PLL_BASE_ENABLE;
pll_writel_base(val, pll);
if (pll->flags & TEGRA_PLLM) {
@@ -204,7 +273,9 @@ static int clk_pll_enable(struct clk_hw *hw)
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
- ret = _clk_pll_enable(hw);
+ _clk_pll_enable(hw);
+
+ ret = clk_pll_wait_for_lock(pll);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
@@ -241,8 +312,6 @@ static int _get_table_rate(struct clk_hw *hw,
if (sel->input_rate == 0)
return -EINVAL;
- BUG_ON(sel->p < 1);
-
cfg->input_rate = sel->input_rate;
cfg->output_rate = sel->output_rate;
cfg->m = sel->m;
@@ -257,6 +326,7 @@ static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
unsigned long rate, unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct pdiv_map *p_tohw = pll->params->pdiv_tohw;
unsigned long cfreq;
u32 p_div = 0;
@@ -290,88 +360,119 @@ static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
cfg->output_rate <<= 1)
p_div++;
- cfg->p = 1 << p_div;
cfg->m = parent_rate / cfreq;
cfg->n = cfg->output_rate / cfreq;
cfg->cpcon = OUT_OF_TABLE_CPCON;
if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
- cfg->p > divp_max(pll) || cfg->output_rate > pll->params->vco_max) {
+ (1 << p_div) > divp_max(pll)
+ || cfg->output_rate > pll->params->vco_max) {
pr_err("%s: Failed to set %s rate %lu\n",
__func__, __clk_get_name(hw->clk), rate);
return -EINVAL;
}
+ if (p_tohw) {
+ p_div = 1 << p_div;
+ while (p_tohw->pdiv) {
+ if (p_div <= p_tohw->pdiv) {
+ cfg->p = p_tohw->hw_val;
+ break;
+ }
+ p_tohw++;
+ }
+ if (!p_tohw->pdiv)
+ return -EINVAL;
+ } else
+ cfg->p = p_div;
+
return 0;
}
-static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
- unsigned long rate)
+static void _update_pll_mnp(struct tegra_clk_pll *pll,
+ struct tegra_clk_pll_freq_table *cfg)
{
- struct tegra_clk_pll *pll = to_clk_pll(hw);
- unsigned long flags = 0;
- u32 divp, val, old_base;
- int state;
-
- divp = __ffs(cfg->p);
-
- if (pll->flags & TEGRA_PLLU)
- divp ^= 1;
+ u32 val;
- if (pll->lock)
- spin_lock_irqsave(pll->lock, flags);
+ val = pll_readl_base(pll);
- old_base = val = pll_readl_base(pll);
val &= ~((divm_mask(pll) << pll->divm_shift) |
(divn_mask(pll) << pll->divn_shift) |
(divp_mask(pll) << pll->divp_shift));
val |= ((cfg->m << pll->divm_shift) |
(cfg->n << pll->divn_shift) |
- (divp << pll->divp_shift));
- if (val == old_base) {
- if (pll->lock)
- spin_unlock_irqrestore(pll->lock, flags);
- return 0;
+ (cfg->p << pll->divp_shift));
+
+ pll_writel_base(val, pll);
+}
+
+static void _get_pll_mnp(struct tegra_clk_pll *pll,
+ struct tegra_clk_pll_freq_table *cfg)
+{
+ u32 val;
+
+ val = pll_readl_base(pll);
+
+ cfg->m = (val >> pll->divm_shift) & (divm_mask(pll));
+ cfg->n = (val >> pll->divn_shift) & (divn_mask(pll));
+ cfg->p = (val >> pll->divp_shift) & (divp_mask(pll));
+}
+
+static void _update_pll_cpcon(struct tegra_clk_pll *pll,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate)
+{
+ u32 val;
+
+ val = pll_readl_misc(pll);
+
+ val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
+ val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
+
+ if (pll->flags & TEGRA_PLL_SET_LFCON) {
+ val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
+ if (cfg->n >= PLLDU_LFCON_SET_DIVN)
+ val |= 1 << PLL_MISC_LFCON_SHIFT;
+ } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
+ val &= ~(1 << PLL_MISC_DCCON_SHIFT);
+ if (rate >= (pll->params->vco_max >> 1))
+ val |= 1 << PLL_MISC_DCCON_SHIFT;
}
+ pll_writel_misc(val, pll);
+}
+
+static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ int state, ret = 0;
+
state = clk_pll_is_enabled(hw);
- if (state) {
+ if (state)
_clk_pll_disable(hw);
- val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
- }
- pll_writel_base(val, pll);
- if (pll->flags & TEGRA_PLL_HAS_CPCON) {
- val = pll_readl_misc(pll);
- val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
- val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
- if (pll->flags & TEGRA_PLL_SET_LFCON) {
- val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
- if (cfg->n >= PLLDU_LFCON_SET_DIVN)
- val |= 0x1 << PLL_MISC_LFCON_SHIFT;
- } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
- val &= ~(0x1 << PLL_MISC_DCCON_SHIFT);
- if (rate >= (pll->params->vco_max >> 1))
- val |= 0x1 << PLL_MISC_DCCON_SHIFT;
- }
- pll_writel_misc(val, pll);
- }
+ _update_pll_mnp(pll, cfg);
- if (pll->lock)
- spin_unlock_irqrestore(pll->lock, flags);
+ if (pll->flags & TEGRA_PLL_HAS_CPCON)
+ _update_pll_cpcon(pll, cfg, rate);
- if (state)
- clk_pll_enable(hw);
+ if (state) {
+ _clk_pll_enable(hw);
+ ret = clk_pll_wait_for_lock(pll);
+ }
- return 0;
+ return ret;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- struct tegra_clk_pll_freq_table cfg;
+ struct tegra_clk_pll_freq_table cfg, old_cfg;
+ unsigned long flags = 0;
+ int ret = 0;
if (pll->flags & TEGRA_PLL_FIXED) {
if (rate != pll->fixed_rate) {
@@ -387,7 +488,18 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
_calc_rate(hw, &cfg, rate, parent_rate))
return -EINVAL;
- return _program_pll(hw, &cfg, rate);
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _get_pll_mnp(pll, &old_cfg);
+
+ if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_cfg.p != cfg.p)
+ ret = _program_pll(hw, &cfg, rate);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
}
static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -409,7 +521,7 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
output_rate *= cfg.n;
- do_div(output_rate, cfg.m * cfg.p);
+ do_div(output_rate, cfg.m * (1 << cfg.p));
return output_rate;
}
@@ -418,11 +530,15 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- u32 val = pll_readl_base(pll);
- u32 divn = 0, divm = 0, divp = 0;
+ struct tegra_clk_pll_freq_table cfg;
+ struct pdiv_map *p_tohw = pll->params->pdiv_tohw;
+ u32 val;
u64 rate = parent_rate;
+ int pdiv;
+
+ val = pll_readl_base(pll);
- if (val & PLL_BASE_BYPASS)
+ if ((pll->flags & TEGRA_PLL_BYPASS) && (val & PLL_BASE_BYPASS))
return parent_rate;
if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
@@ -435,16 +551,29 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
return pll->fixed_rate;
}
- divp = (val >> pll->divp_shift) & (divp_mask(pll));
- if (pll->flags & TEGRA_PLLU)
- divp ^= 1;
+ _get_pll_mnp(pll, &cfg);
- divn = (val >> pll->divn_shift) & (divn_mask(pll));
- divm = (val >> pll->divm_shift) & (divm_mask(pll));
- divm *= (1 << divp);
+ if (p_tohw) {
+ while (p_tohw->pdiv) {
+ if (cfg.p == p_tohw->hw_val) {
+ pdiv = p_tohw->pdiv;
+ break;
+ }
+ p_tohw++;
+ }
+
+ if (!p_tohw->pdiv) {
+ WARN_ON(1);
+ pdiv = 1;
+ }
+ } else
+ pdiv = 1 << cfg.p;
+
+ cfg.m *= pdiv;
+
+ rate *= cfg.n;
+ do_div(rate, cfg.m);
- rate *= divn;
- do_div(rate, divm);
return rate;
}
@@ -538,8 +667,8 @@ static int clk_plle_enable(struct clk_hw *hw)
val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
pll_writel_base(val, pll);
- clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->misc_reg,
- pll->params->lock_bit_idx);
+ clk_pll_wait_for_lock(pll);
+
return 0;
}
@@ -577,28 +706,531 @@ const struct clk_ops tegra_clk_plle_ops = {
.enable = clk_plle_enable,
};
-static struct clk *_tegra_clk_register_pll(const char *name,
- const char *parent_name, void __iomem *clk_base,
- void __iomem *pmc, unsigned long flags,
- unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u8 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock,
- const struct clk_ops *ops)
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+
+static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
+ unsigned long parent_rate)
+{
+ if (parent_rate > pll_params->cf_max)
+ return 2;
+ else
+ return 1;
+}
+
+static int clk_pll_iddq_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+
+ u32 val;
+ int ret;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ val = pll_readl(pll->params->iddq_reg, pll);
+ val &= ~BIT(pll->params->iddq_bit_idx);
+ pll_writel(val, pll->params->iddq_reg, pll);
+ udelay(2);
+
+ _clk_pll_enable(hw);
+
+ ret = clk_pll_wait_for_lock(pll);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static void clk_pll_iddq_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_disable(hw);
+
+ val = pll_readl(pll->params->iddq_reg, pll);
+ val |= BIT(pll->params->iddq_bit_idx);
+ pll_writel(val, pll->params->iddq_reg, pll);
+ udelay(2);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int _calc_dynamic_ramp_rate(struct clk_hw *hw,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned int p;
+
+ if (!rate)
+ return -EINVAL;
+
+ p = DIV_ROUND_UP(pll->params->vco_min, rate);
+ cfg->m = _pll_fixed_mdiv(pll->params, parent_rate);
+ cfg->p = p;
+ cfg->output_rate = rate * cfg->p;
+ cfg->n = cfg->output_rate * cfg->m / parent_rate;
+
+ if (cfg->n > divn_max(pll) || cfg->output_rate > pll->params->vco_max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int _pll_ramp_calc_pll(struct clk_hw *hw,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ int err = 0;
+
+ err = _get_table_rate(hw, cfg, rate, parent_rate);
+ if (err < 0)
+ err = _calc_dynamic_ramp_rate(hw, cfg, rate, parent_rate);
+ else if (cfg->m != _pll_fixed_mdiv(pll->params, parent_rate)) {
+ WARN_ON(1);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!cfg->p || (cfg->p > pll->params->max_p))
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg, old_cfg;
+ unsigned long flags = 0;
+ int ret = 0;
+ u8 old_p;
+
+ ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
+ if (ret < 0)
+ return ret;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _get_pll_mnp(pll, &old_cfg);
+
+ old_p = pllxc_p[old_cfg.p];
+ if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_p != cfg.p) {
+ cfg.p -= 1;
+ ret = _program_pll(hw, &cfg, rate);
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_pll_freq_table cfg;
+ int ret = 0;
+ u64 output_rate = *prate;
+
+ ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
+ if (ret < 0)
+ return ret;
+
+ output_rate *= cfg.n;
+ do_div(output_rate, cfg.m * cfg.p);
+
+ return output_rate;
+}
+
+static int clk_pllm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll_freq_table cfg;
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ int state, ret = 0;
+ u32 val;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ state = clk_pll_is_enabled(hw);
+ if (state) {
+ if (rate != clk_get_rate(hw->clk)) {
+ pr_err("%s: Cannot change active PLLM\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ goto out;
+ }
+
+ ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
+ if (ret < 0)
+ goto out;
+
+ cfg.p -= 1;
+
+ val = readl_relaxed(pll->pmc + PMC_PLLM_WB0_OVERRIDE);
+ if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) {
+ val = readl_relaxed(pll->pmc + PMC_PLLM_WB0_OVERRIDE_2);
+ val = cfg.p ? (val | PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK) :
+ (val & ~PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK);
+ writel_relaxed(val, pll->pmc + PMC_PLLM_WB0_OVERRIDE_2);
+
+ val = readl_relaxed(pll->pmc + PMC_PLLM_WB0_OVERRIDE);
+ val &= ~(divn_mask(pll) | divm_mask(pll));
+ val |= (cfg.m << pll->divm_shift) | (cfg.n << pll->divn_shift);
+ writel_relaxed(val, pll->pmc + PMC_PLLM_WB0_OVERRIDE);
+ } else
+ _update_pll_mnp(pll, &cfg);
+
+
+out:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void _pllcx_strobe(struct tegra_clk_pll *pll)
+{
+ u32 val;
+
+ val = pll_readl_misc(pll);
+ val |= PLLCX_MISC_STROBE;
+ pll_writel_misc(val, pll);
+ udelay(2);
+
+ val &= ~PLLCX_MISC_STROBE;
+ pll_writel_misc(val, pll);
+}
+
+static int clk_pllc_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+ int ret = 0;
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_enable(hw);
+ udelay(2);
+
+ val = pll_readl_misc(pll);
+ val &= ~PLLCX_MISC_RESET;
+ pll_writel_misc(val, pll);
+ udelay(2);
+
+ _pllcx_strobe(pll);
+
+ ret = clk_pll_wait_for_lock(pll);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void _clk_pllc_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ _clk_pll_disable(hw);
+
+ val = pll_readl_misc(pll);
+ val |= PLLCX_MISC_RESET;
+ pll_writel_misc(val, pll);
+ udelay(2);
+}
+
+static void clk_pllc_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pllc_disable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int _pllcx_update_dynamic_coef(struct tegra_clk_pll *pll,
+ unsigned long input_rate, u32 n)
+{
+ u32 val, n_threshold;
+
+ switch (input_rate) {
+ case 12000000:
+ n_threshold = 70;
+ break;
+ case 13000000:
+ case 26000000:
+ n_threshold = 71;
+ break;
+ case 16800000:
+ n_threshold = 55;
+ break;
+ case 19200000:
+ n_threshold = 48;
+ break;
+ default:
+ pr_err("%s: Unexpected reference rate %lu\n",
+ __func__, input_rate);
+ return -EINVAL;
+ }
+
+ val = pll_readl_misc(pll);
+ val &= ~(PLLCX_MISC_SDM_DIV_MASK | PLLCX_MISC_FILT_DIV_MASK);
+ val |= n <= n_threshold ?
+ PLLCX_MISC_DIV_LOW_RANGE : PLLCX_MISC_DIV_HIGH_RANGE;
+ pll_writel_misc(val, pll);
+
+ return 0;
+}
+
+static int clk_pllc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll_freq_table cfg;
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ int state, ret = 0;
+ u32 val;
+ u16 old_m, old_n;
+ u8 old_p;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
+ if (ret < 0)
+ goto out;
+
+ val = pll_readl_base(pll);
+ old_m = (val >> pll->divm_shift) & (divm_mask(pll));
+ old_n = (val >> pll->divn_shift) & (divn_mask(pll));
+ old_p = pllcx_p[(val >> pll->divp_shift) & (divp_mask(pll))];
+
+ if (cfg.m != old_m) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ if (old_n == cfg.n && old_p == cfg.p)
+ goto out;
+
+ cfg.p -= 1;
+
+ state = clk_pll_is_enabled(hw);
+ if (state)
+ _clk_pllc_disable(hw);
+
+ ret = _pllcx_update_dynamic_coef(pll, parent_rate, cfg.n);
+ if (ret < 0)
+ goto out;
+
+ _update_pll_mnp(pll, &cfg);
+
+ if (state)
+ ret = clk_pllc_enable(hw);
+
+out:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static long _pllre_calc_rate(struct tegra_clk_pll *pll,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ u16 m, n;
+ u64 output_rate = parent_rate;
+
+ m = _pll_fixed_mdiv(pll->params, parent_rate);
+ n = rate * m / parent_rate;
+
+ output_rate *= n;
+ do_div(output_rate, m);
+
+ if (cfg) {
+ cfg->m = m;
+ cfg->n = n;
+ }
+
+ return output_rate;
+}
+static int clk_pllre_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll_freq_table cfg, old_cfg;
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ int state, ret = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _pllre_calc_rate(pll, &cfg, rate, parent_rate);
+ _get_pll_mnp(pll, &old_cfg);
+ cfg.p = old_cfg.p;
+
+ if (cfg.m != old_cfg.m || cfg.n != old_cfg.n) {
+ state = clk_pll_is_enabled(hw);
+ if (state)
+ _clk_pll_disable(hw);
+
+ _update_pll_mnp(pll, &cfg);
+
+ if (state) {
+ _clk_pll_enable(hw);
+ ret = clk_pll_wait_for_lock(pll);
+ }
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static unsigned long clk_pllre_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll_freq_table cfg;
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u64 rate = parent_rate;
+
+ _get_pll_mnp(pll, &cfg);
+
+ rate *= cfg.n;
+ do_div(rate, cfg.m);
+
+ return rate;
+}
+
+static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+
+ return _pllre_calc_rate(pll, NULL, rate, *prate);
+}
+
+static int clk_plle_tegra114_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table sel;
+ u32 val;
+ int ret;
+ unsigned long flags = 0;
+ unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ return -EINVAL;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ val = pll_readl_base(pll);
+ val &= ~BIT(29); /* Disable lock override */
+ pll_writel_base(val, pll);
+
+ val = pll_readl(pll->params->aux_reg, pll);
+ val |= PLLE_AUX_ENABLE_SWCTL;
+ val &= ~PLLE_AUX_SEQ_ENABLE;
+ pll_writel(val, pll->params->aux_reg, pll);
+ udelay(1);
+
+ val = pll_readl_misc(pll);
+ val |= PLLE_MISC_LOCK_ENABLE;
+ val |= PLLE_MISC_IDDQ_SW_CTRL;
+ val &= ~PLLE_MISC_IDDQ_SW_VALUE;
+ val |= PLLE_MISC_PLLE_PTS;
+ val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+ pll_writel_misc(val, pll);
+ udelay(5);
+
+ val = pll_readl(PLLE_SS_CTRL, pll);
+ val |= PLLE_SS_DISABLE;
+ pll_writel(val, PLLE_SS_CTRL, pll);
+
+ val = pll_readl_base(pll);
+ val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
+ val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << pll->divm_shift;
+ val |= sel.n << pll->divn_shift;
+ val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
+ pll_writel_base(val, pll);
+ udelay(1);
+
+ _clk_pll_enable(hw);
+ ret = clk_pll_wait_for_lock(pll);
+
+ if (ret < 0)
+ goto out;
+
+ /* TODO: enable hw control of xusb brick pll */
+
+out:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void clk_plle_tegra114_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_disable(hw);
+
+ val = pll_readl_misc(pll);
+ val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
+ pll_writel_misc(val, pll);
+ udelay(1);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+#endif
+
+static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
+ void __iomem *pmc, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
{
struct tegra_clk_pll *pll;
- struct clk *clk;
- struct clk_init_data init;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
- init.name = name;
- init.ops = ops;
- init.flags = flags;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
-
pll->clk_base = clk_base;
pll->pmc = pmc;
@@ -615,34 +1247,336 @@ static struct clk *_tegra_clk_register_pll(const char *name,
pll->divm_shift = PLL_BASE_DIVM_SHIFT;
pll->divm_width = PLL_BASE_DIVM_WIDTH;
+ return pll;
+}
+
+static struct clk *_tegra_clk_register_pll(struct tegra_clk_pll *pll,
+ const char *name, const char *parent_name, unsigned long flags,
+ const struct clk_ops *ops)
+{
+ struct clk_init_data init;
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* Data in .init is copied by clk_register(), so stack variable OK */
pll->hw.init = &init;
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk))
- kfree(pll);
-
- return clk;
+ return clk_register(NULL, &pll->hw);
}
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
{
- return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
- flags, fixed_rate, pll_params, pll_flags, freq_table,
- lock, &tegra_clk_pll_ops);
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_flags |= TEGRA_PLL_BYPASS;
+ pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
+ freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pll_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
}
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
{
- return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
- flags, fixed_rate, pll_params, pll_flags, freq_table,
- lock, &tegra_clk_plle_ops);
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
+ pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
+ freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_plle_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+const struct clk_ops tegra_clk_pllxc_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_iddq_enable,
+ .disable = clk_pll_iddq_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_ramp_round_rate,
+ .set_rate = clk_pllxc_set_rate,
+};
+
+const struct clk_ops tegra_clk_pllm_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_iddq_enable,
+ .disable = clk_pll_iddq_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_ramp_round_rate,
+ .set_rate = clk_pllm_set_rate,
+};
+
+const struct clk_ops tegra_clk_pllc_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pllc_enable,
+ .disable = clk_pllc_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_ramp_round_rate,
+ .set_rate = clk_pllc_set_rate,
+};
+
+const struct clk_ops tegra_clk_pllre_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_iddq_enable,
+ .disable = clk_pll_iddq_disable,
+ .recalc_rate = clk_pllre_recalc_rate,
+ .round_rate = clk_pllre_round_rate,
+ .set_rate = clk_pllre_set_rate,
+};
+
+const struct clk_ops tegra_clk_plle_tegra114_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_plle_tegra114_enable,
+ .disable = clk_plle_tegra114_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+
+
+struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ if (!pll_params->pdiv_tohw)
+ return ERR_PTR(-EINVAL);
+
+ pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
+ freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllxc_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock, unsigned long parent_rate)
+{
+ u32 val;
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
+ freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ /* program minimum rate by default */
+
+ val = pll_readl_base(pll);
+ if (val & PLL_BASE_ENABLE)
+ WARN_ON(val & pll_params->iddq_bit_idx);
+ else {
+ int m;
+
+ m = _pll_fixed_mdiv(pll_params, parent_rate);
+ val = m << PLL_BASE_DIVM_SHIFT;
+ val |= (pll_params->vco_min / parent_rate)
+ << PLL_BASE_DIVN_SHIFT;
+ pll_writel_base(val, pll);
+ }
+
+ /* disable lock override */
+
+ val = pll_readl_misc(pll);
+ val &= ~BIT(29);
+ pll_writel_misc(val, pll);
+
+ pll_flags |= TEGRA_PLL_LOCK_MISC;
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllre_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ if (!pll_params->pdiv_tohw)
+ return ERR_PTR(-EINVAL);
+
+ pll_flags |= TEGRA_PLL_BYPASS;
+ pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
+ freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllm_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock)
+{
+ struct clk *parent, *clk;
+ struct pdiv_map *p_tohw = pll_params->pdiv_tohw;
+ struct tegra_clk_pll *pll;
+ struct tegra_clk_pll_freq_table cfg;
+ unsigned long parent_rate;
+
+ if (!p_tohw)
+ return ERR_PTR(-EINVAL);
+
+ parent = __clk_lookup(parent_name);
+ if (IS_ERR(parent)) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll_flags |= TEGRA_PLL_BYPASS;
+ pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
+ freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ parent_rate = __clk_get_rate(parent);
+
+ /*
+ * Most of PLLC register fields are shadowed, and can not be read
+ * directly from PLL h/w. Hence, actual PLLC boot state is unknown.
+ * Initialize PLL to default state: disabled, reset; shadow registers
+ * loaded with default parameters; dividers are preset for half of
+ * minimum VCO rate (the latter assured that shadowed divider settings
+ * are within supported range).
+ */
+
+ cfg.m = _pll_fixed_mdiv(pll_params, parent_rate);
+ cfg.n = cfg.m * pll_params->vco_min / parent_rate;
+
+ while (p_tohw->pdiv) {
+ if (p_tohw->pdiv == 2) {
+ cfg.p = p_tohw->hw_val;
+ break;
+ }
+ p_tohw++;
+ }
+
+ if (!p_tohw->pdiv) {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll_writel_base(0, pll);
+ _update_pll_mnp(pll, &cfg);
+
+ pll_writel_misc(PLLCX_MISC_DEFAULT, pll);
+ pll_writel(PLLCX_MISC1_DEFAULT, pll_params->ext_misc_reg[0], pll);
+ pll_writel(PLLCX_MISC2_DEFAULT, pll_params->ext_misc_reg[1], pll);
+ pll_writel(PLLCX_MISC3_DEFAULT, pll_params->ext_misc_reg[2], pll);
+
+ _pllcx_update_dynamic_coef(pll, parent_rate, cfg.n);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllc_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_plle_tegra114(const char *name,
+ const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+ u32 val, val_aux;
+
+ pll = _tegra_init_pll(clk_base, NULL, fixed_rate, pll_params,
+ TEGRA_PLL_HAS_LOCK_ENABLE, freq_table, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ /* ensure parent is set to pll_re_vco */
+
+ val = pll_readl_base(pll);
+ val_aux = pll_readl(pll_params->aux_reg, pll);
+
+ if (val & PLL_BASE_ENABLE) {
+ if (!(val_aux & PLLE_AUX_PLLRE_SEL))
+ WARN(1, "pll_e enabled with unsupported parent %s\n",
+ (val & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : "pll_ref");
+ } else {
+ val_aux |= PLLE_AUX_PLLRE_SEL;
+ pll_writel(val, pll_params->aux_reg, pll);
+ }
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_plle_tegra114_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
}
+#endif
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
new file mode 100644
index 0000000..d78e16e
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -0,0 +1,2085 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00C
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35C
+#define RST_DEVICES_X 0x28C
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_NUM 5
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_X 0x280
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_SET_X 0x284
+#define CLK_OUT_ENB_CLR_X 0x288
+#define CLK_OUT_ENB_NUM 6
+
+#define PLLC_BASE 0x80
+#define PLLC_MISC2 0x88
+#define PLLC_MISC 0x8c
+#define PLLC2_BASE 0x4e8
+#define PLLC2_MISC 0x4ec
+#define PLLC3_BASE 0x4fc
+#define PLLC3_MISC 0x500
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLX_MISC2 0x514
+#define PLLX_MISC3 0x518
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLD2_BASE 0x4b8
+#define PLLD2_MISC 0x4bc
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+#define PLLRE_BASE 0x4c4
+#define PLLRE_MISC 0x4c8
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLC_MISC_LOCK_ENABLE 24
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+#define PLLRE_MISC_LOCK_ENABLE 30
+
+#define PLLC_IDDQ_BIT 26
+#define PLLX_IDDQ_BIT 3
+#define PLLRE_IDDQ_BIT 16
+
+#define PLL_BASE_LOCK BIT(27)
+#define PLLE_MISC_LOCK BIT(11)
+#define PLLRE_MISC_LOCK BIT(24)
+#define PLLCX_BASE_LOCK (BIT(26)|BIT(27))
+
+#define PLLE_AUX 0x48c
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define AUDIO_SYNC_CLK_I2S0 0x4a0
+#define AUDIO_SYNC_CLK_I2S1 0x4a4
+#define AUDIO_SYNC_CLK_I2S2 0x4a8
+#define AUDIO_SYNC_CLK_I2S3 0x4ac
+#define AUDIO_SYNC_CLK_I2S4 0x4b0
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+
+#define AUDIO_SYNC_DOUBLER 0x49c
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_CTRL 0
+#define PMC_CTRL_BLINK_ENB 7
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_SHIFT 28
+#define OSC_CTRL_PLL_REF_DIV_SHIFT 26
+
+#define PLLXC_SW_MAX_P 6
+
+#define CCLKG_BURST_POLICY 0x368
+#define CCLKLP_BURST_POLICY 0x370
+#define SCLK_BURST_POLICY 0x028
+#define SYSTEM_CLK_RATE 0x030
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+
+#define UTMIPLL_HW_PWRDN_CFG0 0x52c
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
+#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4)
+#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
+
+#define CLK_SOURCE_I2S0 0x1d8
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_I2S3 0x3bc
+#define CLK_SOURCE_I2S4 0x3c0
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_ADX 0x638
+#define CLK_SOURCE_AMX 0x63c
+#define CLK_SOURCE_HDA 0x428
+#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_SBC5 0x3c8
+#define CLK_SOURCE_SBC6 0x3cc
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_NDSPEED 0x3f8
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_TRACE 0x634
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_I2C4 0x3c4
+#define CLK_SOURCE_I2C5 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_UARTA_DBG 0x178
+#define CLK_SOURCE_UARTB_DBG 0x17c
+#define CLK_SOURCE_UARTC_DBG 0x1a0
+#define CLK_SOURCE_UARTD_DBG 0x1c0
+#define CLK_SOURCE_UARTE_DBG 0x1c4
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_MSENC 0x1f0
+#define CLK_SOURCE_TSEC 0x1f4
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_CILAB 0x614
+#define CLK_SOURCE_CILCD 0x618
+#define CLK_SOURCE_CILE 0x61c
+#define CLK_SOURCE_DSIALP 0x620
+#define CLK_SOURCE_DSIBLP 0x624
+#define CLK_SOURCE_TSENSOR 0x3b8
+#define CLK_SOURCE_D_AUDIO 0x3d0
+#define CLK_SOURCE_DAM0 0x3d8
+#define CLK_SOURCE_DAM1 0x3dc
+#define CLK_SOURCE_DAM2 0x3e0
+#define CLK_SOURCE_ACTMON 0x3e8
+#define CLK_SOURCE_EXTERN1 0x3ec
+#define CLK_SOURCE_EXTERN2 0x3f0
+#define CLK_SOURCE_EXTERN3 0x3f4
+#define CLK_SOURCE_I2CSLOW 0x3fc
+#define CLK_SOURCE_SE 0x42c
+#define CLK_SOURCE_MSELECT 0x3b4
+#define CLK_SOURCE_SOC_THERM 0x644
+#define CLK_SOURCE_XUSB_HOST_SRC 0x600
+#define CLK_SOURCE_XUSB_FALCON_SRC 0x604
+#define CLK_SOURCE_XUSB_FS_SRC 0x608
+#define CLK_SOURCE_XUSB_SS_SRC 0x610
+#define CLK_SOURCE_XUSB_DEV_SRC 0x60c
+#define CLK_SOURCE_EMC 0x19c
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+
+static DEFINE_SPINLOCK(pll_d_lock);
+static DEFINE_SPINLOCK(pll_d2_lock);
+static DEFINE_SPINLOCK(pll_u_lock);
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(pll_re_lock);
+static DEFINE_SPINLOCK(clk_doubler_lock);
+static DEFINE_SPINLOCK(clk_out_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+static struct pdiv_map pllxc_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 5, .hw_val = 4 },
+ { .pdiv = 6, .hw_val = 5 },
+ { .pdiv = 8, .hw_val = 6 },
+ { .pdiv = 10, .hw_val = 7 },
+ { .pdiv = 12, .hw_val = 8 },
+ { .pdiv = 16, .hw_val = 9 },
+ { .pdiv = 12, .hw_val = 10 },
+ { .pdiv = 16, .hw_val = 11 },
+ { .pdiv = 20, .hw_val = 12 },
+ { .pdiv = 24, .hw_val = 13 },
+ { .pdiv = 32, .hw_val = 14 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 624000000, 104, 0, 2},
+ { 12000000, 600000000, 100, 0, 2},
+ { 13000000, 600000000, 92, 0, 2}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 0, 2}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 0, 2}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 12000000,
+ .input_max = 800000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLC_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLC_MISC,
+ .iddq_bit_idx = PLLC_IDDQ_BIT,
+ .max_p = PLLXC_SW_MAX_P,
+ .dyn_ramp_reg = PLLC_MISC2,
+ .stepa_shift = 17,
+ .stepb_shift = 9,
+ .pdiv_tohw = pllxc_p,
+};
+
+static struct pdiv_map pllc_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 8, .hw_val = 5 },
+ { .pdiv = 16, .hw_val = 7 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_cx_freq_table[] = {
+ {12000000, 600000000, 100, 0, 2},
+ {13000000, 600000000, 92, 0, 2}, /* actual: 598.0 MHz */
+ {16800000, 600000000, 71, 0, 2}, /* actual: 596.4 MHz */
+ {19200000, 600000000, 62, 0, 2}, /* actual: 595.2 MHz */
+ {26000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_c2_params = {
+ .input_min = 12000000,
+ .input_max = 48000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000,
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC2_BASE,
+ .misc_reg = PLLC2_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .pdiv_tohw = pllc_p,
+ .ext_misc_reg[0] = 0x4f0,
+ .ext_misc_reg[1] = 0x4f4,
+ .ext_misc_reg[2] = 0x4f8,
+};
+
+static struct tegra_clk_pll_params pll_c3_params = {
+ .input_min = 12000000,
+ .input_max = 48000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000,
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC3_BASE,
+ .misc_reg = PLLC3_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .pdiv_tohw = pllc_p,
+ .ext_misc_reg[0] = 0x504,
+ .ext_misc_reg[1] = 0x508,
+ .ext_misc_reg[2] = 0x50c,
+};
+
+static struct pdiv_map pllm_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ {12000000, 800000000, 66, 0, 1}, /* actual: 792.0 MHz */
+ {13000000, 800000000, 61, 0, 1}, /* actual: 793.0 MHz */
+ {16800000, 800000000, 47, 0, 1}, /* actual: 789.6 MHz */
+ {19200000, 800000000, 41, 0, 1}, /* actual: 787.2 MHz */
+ {26000000, 800000000, 61, 1, 1}, /* actual: 793.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 12000000,
+ .input_max = 500000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 400000000,
+ .vco_max = 1066000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .max_p = 2,
+ .pdiv_tohw = pllm_p,
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ {12000000, 216000000, 432, 12, 1, 8},
+ {13000000, 216000000, 432, 13, 1, 8},
+ {16800000, 216000000, 360, 14, 1, 8},
+ {19200000, 216000000, 360, 16, 1, 8},
+ {26000000, 216000000, 432, 26, 1, 8},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 200000000,
+ .vco_max = 700000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ {9600000, 282240000, 147, 5, 0, 4},
+ {9600000, 368640000, 192, 5, 0, 4},
+ {9600000, 240000000, 200, 8, 0, 8},
+
+ {28800000, 282240000, 245, 25, 0, 8},
+ {28800000, 368640000, 320, 25, 0, 8},
+ {28800000, 240000000, 200, 24, 0, 8},
+ {0, 0, 0, 0, 0, 0},
+};
+
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 200000000,
+ .vco_max = 700000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ {12000000, 216000000, 864, 12, 2, 12},
+ {13000000, 216000000, 864, 13, 2, 12},
+ {16800000, 216000000, 720, 14, 2, 12},
+ {19200000, 216000000, 720, 16, 2, 12},
+ {26000000, 216000000, 864, 26, 2, 12},
+
+ {12000000, 594000000, 594, 12, 0, 12},
+ {13000000, 594000000, 594, 13, 0, 12},
+ {16800000, 594000000, 495, 14, 0, 12},
+ {19200000, 594000000, 495, 16, 0, 12},
+ {26000000, 594000000, 594, 26, 0, 12},
+
+ {12000000, 1000000000, 1000, 12, 0, 12},
+ {13000000, 1000000000, 1000, 13, 0, 12},
+ {19200000, 1000000000, 625, 12, 0, 12},
+ {26000000, 1000000000, 1000, 26, 0, 12},
+
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 500000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_d2_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 500000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD2_BASE,
+ .misc_reg = PLLD2_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct pdiv_map pllu_p[] = {
+ { .pdiv = 1, .hw_val = 1 },
+ { .pdiv = 2, .hw_val = 0 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ {12000000, 480000000, 960, 12, 0, 12},
+ {13000000, 480000000, 960, 13, 0, 12},
+ {16800000, 480000000, 400, 7, 0, 5},
+ {19200000, 480000000, 200, 4, 0, 3},
+ {26000000, 480000000, 960, 26, 0, 12},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 480000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+ .pdiv_tohw = pllu_p,
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1 GHz */
+ {12000000, 1000000000, 83, 0, 1}, /* actual: 996.0 MHz */
+ {13000000, 1000000000, 76, 0, 1}, /* actual: 988.0 MHz */
+ {16800000, 1000000000, 59, 0, 1}, /* actual: 991.2 MHz */
+ {19200000, 1000000000, 52, 0, 1}, /* actual: 998.4 MHz */
+ {26000000, 1000000000, 76, 1, 1}, /* actual: 988.0 MHz */
+
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 12000000,
+ .input_max = 800000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 700000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLX_MISC3,
+ .iddq_bit_idx = PLLX_IDDQ_BIT,
+ .max_p = PLLXC_SW_MAX_P,
+ .dyn_ramp_reg = PLLX_MISC2,
+ .stepa_shift = 16,
+ .stepb_shift = 24,
+ .pdiv_tohw = pllxc_p,
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ {336000000, 100000000, 100, 21, 16, 11},
+ {312000000, 100000000, 200, 26, 24, 13},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 75000000,
+ .vco_min = 1600000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .aux_reg = PLLE_AUX,
+ .lock_mask = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_re_vco_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 300000000,
+ .vco_max = 600000000,
+ .base_reg = PLLRE_BASE,
+ .misc_reg = PLLRE_MISC,
+ .lock_mask = PLLRE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLRE_MISC,
+ .iddq_bit_idx = PLLRE_IDDQ_BIT,
+};
+
+/* Peripheral clock registers */
+
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static struct tegra_clk_periph_regs periph_v_regs = {
+ .enb_reg = CLK_OUT_ENB_V,
+ .enb_set_reg = CLK_OUT_ENB_SET_V,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_V,
+ .rst_reg = RST_DEVICES_V,
+ .rst_set_reg = RST_DEVICES_SET_V,
+ .rst_clr_reg = RST_DEVICES_CLR_V,
+};
+
+static struct tegra_clk_periph_regs periph_w_regs = {
+ .enb_reg = CLK_OUT_ENB_W,
+ .enb_set_reg = CLK_OUT_ENB_SET_W,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_W,
+ .rst_reg = RST_DEVICES_W,
+ .rst_set_reg = RST_DEVICES_SET_W,
+ .rst_clr_reg = RST_DEVICES_CLR_W,
+};
+
+/* possible OSC frequencies in Hz */
+static unsigned long tegra114_input_freq[] = {
+ [0] = 13000000,
+ [1] = 16800000,
+ [4] = 19200000,
+ [5] = 38400000,
+ [8] = 12000000,
+ [9] = 48000000,
+ [12] = 260000000,
+};
+
+#define MASK(x) (BIT(x) - 1)
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id, \
+ _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_MUX_FLAGS(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _gate_flags, _clk_id, flags)\
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id, \
+ _parents##_idx, flags)
+
+#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 29, MASK(3), 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id, \
+ _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id, _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_INT_FLAGS(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _gate_flags, _clk_id, flags)\
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id, _parents##_idx, flags)
+
+#define TEGRA_INIT_DATA_INT8(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id, _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 30, MASK(2), 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs,\
+ _clk_num, periph_clk_enb_refcnt, 0, _clk_id, \
+ _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_I2C(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ 30, MASK(2), 0, 0, 16, 0, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, 0, _clk_id, _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_mask, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
+ _mux_shift, _mux_mask, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id, _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_XUSB(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id, _parents##_idx, 0)
+
+#define TEGRA_INIT_DATA_AUDIO(_name, _con_id, _dev_id, _offset, _clk_num,\
+ _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, mux_d_audio_clk, \
+ _offset, 16, 0xE01F, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags , _clk_id, \
+ mux_d_audio_clk_idx, 0)
+
+enum tegra114_clk {
+ rtc = 4, timer = 5, uarta = 6, sdmmc2 = 9, i2s1 = 11, i2c1 = 12,
+ ndflash = 13, sdmmc1 = 14, sdmmc4 = 15, pwm = 17, i2s2 = 18, epp = 19,
+ gr_2d = 21, usbd = 22, isp = 23, gr_3d = 24, disp2 = 26, disp1 = 27,
+ host1x = 28, vcp = 29, i2s0 = 30, apbdma = 34, kbc = 36, kfuse = 40,
+ sbc1 = 41, nor = 42, sbc2 = 44, sbc3 = 46, i2c5 = 47, dsia = 48,
+ mipi = 50, hdmi = 51, csi = 52, i2c2 = 54, uartc = 55, mipi_cal = 56,
+ emc, usb2, usb3, vde = 61, bsea = 62, bsev = 63, uartd = 65,
+ i2c3 = 67, sbc4 = 68, sdmmc3 = 69, owr = 71, csite = 73,
+ la = 76, trace = 77, soc_therm = 78, dtv = 79, ndspeed = 80,
+ i2cslow = 81, dsib = 82, tsec = 83, xusb_host = 89, msenc = 91,
+ csus = 92, mselect = 99, tsensor = 100, i2s3 = 101, i2s4 = 102,
+ i2c4 = 103, sbc5 = 104, sbc6 = 105, d_audio, apbif = 107, dam0, dam1,
+ dam2, hda2codec_2x = 111, audio0_2x = 113, audio1_2x, audio2_2x,
+ audio3_2x, audio4_2x, spdif_2x, actmon = 119, extern1 = 120,
+ extern2 = 121, extern3 = 122, hda = 125, se = 127, hda2hdmi = 128,
+ cilab = 144, cilcd = 145, cile = 146, dsialp = 147, dsiblp = 148,
+ dds = 150, dp2 = 152, amx = 153, adx = 154, xusb_ss = 156, uartb = 192,
+ vfir, spdif_in, spdif_out, vi, vi_sensor, fuse, fuse_burn, clk_32k,
+ clk_m, clk_m_div2, clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_c2,
+ pll_c3, pll_m, pll_m_out1, pll_p, pll_p_out1, pll_p_out2, pll_p_out3,
+ pll_p_out4, pll_a, pll_a_out0, pll_d, pll_d_out0, pll_d2, pll_d2_out0,
+ pll_u, pll_u_480M, pll_u_60M, pll_u_48M, pll_u_12M, pll_x, pll_x_out0,
+ pll_re_vco, pll_re_out, pll_e_out0, spdif_in_sync, i2s0_sync,
+ i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync, vimclk_sync, audio0,
+ audio1, audio2, audio3, audio4, spdif, clk_out_1, clk_out_2, clk_out_3,
+ blink, xusb_host_src = 252, xusb_falcon_src, xusb_fs_src, xusb_ss_src,
+ xusb_dev_src, xusb_dev, xusb_hs_src, sclk, hclk, pclk, cclk_g, cclk_lp,
+
+ /* Mux clocks */
+
+ audio0_mux = 300, audio1_mux, audio2_mux, audio3_mux, audio4_mux,
+ spdif_mux, clk_out_1_mux, clk_out_2_mux, clk_out_3_mux, dsia_mux,
+ dsib_mux, clk_max,
+};
+
+struct utmi_clk_param {
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+ {.osc_frequency = 13000000, .enable_delay_count = 0x02,
+ .stable_count = 0x33, .active_delay_count = 0x05,
+ .xtal_freq_count = 0x7F},
+ {.osc_frequency = 19200000, .enable_delay_count = 0x03,
+ .stable_count = 0x4B, .active_delay_count = 0x06,
+ .xtal_freq_count = 0xBB},
+ {.osc_frequency = 12000000, .enable_delay_count = 0x02,
+ .stable_count = 0x2F, .active_delay_count = 0x04,
+ .xtal_freq_count = 0x76},
+ {.osc_frequency = 26000000, .enable_delay_count = 0x04,
+ .stable_count = 0x66, .active_delay_count = 0x09,
+ .xtal_freq_count = 0xFE},
+ {.osc_frequency = 16800000, .enable_delay_count = 0x03,
+ .stable_count = 0x41, .active_delay_count = 0x0A,
+ .xtal_freq_count = 0xA4},
+};
+
+/* peripheral mux definitions */
+
+#define MUX_I2S_SPDIF(_id) \
+static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
+ #_id, "pll_p",\
+ "clk_m"};
+MUX_I2S_SPDIF(audio0)
+MUX_I2S_SPDIF(audio1)
+MUX_I2S_SPDIF(audio2)
+MUX_I2S_SPDIF(audio3)
+MUX_I2S_SPDIF(audio4)
+MUX_I2S_SPDIF(audio)
+
+#define mux_pllaout0_audio0_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio1_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio2_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio3_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio4_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio_2x_pllp_clkm_idx NULL
+
+static const char *mux_pllp_pllc_pllm_clkm[] = {
+ "pll_p", "pll_c", "pll_m", "clk_m"
+};
+#define mux_pllp_pllc_pllm_clkm_idx NULL
+
+static const char *mux_pllp_pllc_pllm[] = { "pll_p", "pll_c", "pll_m" };
+#define mux_pllp_pllc_pllm_idx NULL
+
+static const char *mux_pllp_pllc_clk32_clkm[] = {
+ "pll_p", "pll_c", "clk_32k", "clk_m"
+};
+#define mux_pllp_pllc_clk32_clkm_idx NULL
+
+static const char *mux_plla_pllc_pllp_clkm[] = {
+ "pll_a_out0", "pll_c", "pll_p", "clk_m"
+};
+#define mux_plla_pllc_pllp_clkm_idx mux_pllp_pllc_pllm_clkm_idx
+
+static const char *mux_pllp_pllc2_c_c3_pllm_clkm[] = {
+ "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_m", "clk_m"
+};
+static u32 mux_pllp_pllc2_c_c3_pllm_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
+};
+
+static const char *mux_pllp_clkm[] = {
+ "pll_p", "clk_m"
+};
+static u32 mux_pllp_clkm_idx[] = {
+ [0] = 0, [1] = 3,
+};
+
+static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
+ "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
+};
+#define mux_pllm_pllc2_c_c3_pllp_plla_idx mux_pllp_pllc2_c_c3_pllm_clkm_idx
+
+static const char *mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = {
+ "pll_p", "pll_m", "pll_d_out0", "pll_a_out0", "pll_c",
+ "pll_d2_out0", "clk_m"
+};
+#define mux_pllp_pllm_plld_plla_pllc_plld2_clkm_idx NULL
+
+static const char *mux_pllm_pllc_pllp_plla[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a_out0"
+};
+#define mux_pllm_pllc_pllp_plla_idx mux_pllp_pllc_pllm_clkm_idx
+
+static const char *mux_pllp_pllc_clkm[] = {
+ "pll_p", "pll_c", "pll_m"
+};
+static u32 mux_pllp_pllc_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3,
+};
+
+static const char *mux_pllp_pllc_clkm_clk32[] = {
+ "pll_p", "pll_c", "clk_m", "clk_32k"
+};
+#define mux_pllp_pllc_clkm_clk32_idx NULL
+
+static const char *mux_plla_clk32_pllp_clkm_plle[] = {
+ "pll_a_out0", "clk_32k", "pll_p", "clk_m", "pll_e_out0"
+};
+#define mux_plla_clk32_pllp_clkm_plle_idx NULL
+
+static const char *mux_clkm_pllp_pllc_pllre[] = {
+ "clk_m", "pll_p", "pll_c", "pll_re_out"
+};
+static u32 mux_clkm_pllp_pllc_pllre_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 5,
+};
+
+static const char *mux_clkm_48M_pllp_480M[] = {
+ "clk_m", "pll_u_48M", "pll_p", "pll_u_480M"
+};
+#define mux_clkm_48M_pllp_480M_idx NULL
+
+static const char *mux_clkm_pllre_clk32_480M_pllc_ref[] = {
+ "clk_m", "pll_re_out", "clk_32k", "pll_u_480M", "pll_c", "pll_ref"
+};
+static u32 mux_clkm_pllre_clk32_480M_pllc_ref_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 4, [5] = 7,
+};
+
+static const char *mux_plld_out0_plld2_out0[] = {
+ "pll_d_out0", "pll_d2_out0",
+};
+#define mux_plld_out0_plld2_out0_idx NULL
+
+static const char *mux_d_audio_clk[] = {
+ "pll_a_out0", "pll_p", "clk_m", "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
+};
+static u32 mux_d_audio_clk_idx[] = {
+ [0] = 0, [1] = 0x8000, [2] = 0xc000, [3] = 0xE000, [4] = 0xE001,
+ [5] = 0xE002, [6] = 0xE003, [7] = 0xE004, [8] = 0xE005, [9] = 0xE007,
+};
+
+static const char *mux_pllmcp_clkm[] = {
+ "pll_m_out0", "pll_c_out0", "pll_p_out0", "clk_m", "pll_m_ud",
+};
+
+static const struct clk_div_table pll_re_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 0, .div = 0 },
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+static unsigned long osc_freq;
+static unsigned long pll_ref_freq;
+
+static int __init tegra114_osc_clk_init(void __iomem *clk_base)
+{
+ struct clk *clk;
+ u32 val, pll_ref_div;
+
+ val = readl_relaxed(clk_base + OSC_CTRL);
+
+ osc_freq = tegra114_input_freq[val >> OSC_CTRL_OSC_FREQ_SHIFT];
+ if (!osc_freq) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
+ osc_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
+ pll_ref_div = 1 << val;
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+
+ pll_ref_freq = osc_freq / pll_ref_div;
+
+ return 0;
+}
+
+static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+
+ /* clk_m_div2 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "clk_m_div2", NULL);
+ clks[clk_m_div2] = clk;
+
+ /* clk_m_div4 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 4);
+ clk_register_clkdev(clk, "clk_m_div4", NULL);
+ clks[clk_m_div4] = clk;
+
+}
+
+static __init void tegra114_utmi_param_configure(void __iomem *clk_base)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (osc_freq == utmi_parameters[i].osc_frequency)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected oscillator freq %lu\n", __func__,
+ osc_freq);
+ return;
+ }
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].
+ active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].
+ enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].
+ xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+
+ /* Setup HW control of UTMIPLL */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
+ reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
+ reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+
+ udelay(1);
+
+ /* Setup SW override of UTMIPLL assuming USB2.0
+ ports are assigned to USB2 */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL;
+ reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ udelay(1);
+
+ /* Enable HW control UTMIPLL */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+}
+
+static void __init _clip_vco_min(struct tegra_clk_pll_params *pll_params)
+{
+ pll_params->vco_min =
+ DIV_ROUND_UP(pll_params->vco_min, pll_ref_freq) * pll_ref_freq;
+}
+
+static int __init _setup_dynamic_ramp(struct tegra_clk_pll_params *pll_params,
+ void __iomem *clk_base)
+{
+ u32 val;
+ u32 step_a, step_b;
+
+ switch (pll_ref_freq) {
+ case 12000000:
+ case 13000000:
+ case 26000000:
+ step_a = 0x2B;
+ step_b = 0x0B;
+ break;
+ case 16800000:
+ step_a = 0x1A;
+ step_b = 0x09;
+ break;
+ case 19200000:
+ step_a = 0x12;
+ step_b = 0x08;
+ break;
+ default:
+ pr_err("%s: Unexpected reference rate %lu\n",
+ __func__, pll_ref_freq);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ val = step_a << pll_params->stepa_shift;
+ val |= step_b << pll_params->stepb_shift;
+ writel_relaxed(val, clk_base + pll_params->dyn_ramp_reg);
+
+ return 0;
+}
+
+static void __init _init_iddq(struct tegra_clk_pll_params *pll_params,
+ void __iomem *clk_base)
+{
+ u32 val, val_iddq;
+
+ val = readl_relaxed(clk_base + pll_params->base_reg);
+ val_iddq = readl_relaxed(clk_base + pll_params->iddq_reg);
+
+ if (val & BIT(30))
+ WARN_ON(val_iddq & BIT(pll_params->iddq_bit_idx));
+ else {
+ val_iddq |= BIT(pll_params->iddq_bit_idx);
+ writel_relaxed(val_iddq, clk_base + pll_params->iddq_reg);
+ }
+}
+
+static void __init tegra114_pll_init(void __iomem *clk_base,
+ void __iomem *pmc)
+{
+ u32 val;
+ struct clk *clk;
+
+ /* PLLC */
+ _clip_vco_min(&pll_c_params);
+ if (_setup_dynamic_ramp(&pll_c_params, clk_base) >= 0) {
+ _init_iddq(&pll_c_params, clk_base);
+ clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
+ pmc, 0, 0, &pll_c_params, TEGRA_PLL_USE_LOCK,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0,
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+ }
+
+ /* PLLC2 */
+ _clip_vco_min(&pll_c2_params);
+ clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0, 0,
+ &pll_c2_params, TEGRA_PLL_USE_LOCK,
+ pll_cx_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c2", NULL);
+ clks[pll_c2] = clk;
+
+ /* PLLC3 */
+ _clip_vco_min(&pll_c3_params);
+ clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0, 0,
+ &pll_c3_params, TEGRA_PLL_USE_LOCK,
+ pll_cx_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c3", NULL);
+ clks[pll_c3] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc, 0,
+ 408000000, &pll_p_params,
+ TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
+ pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ _clip_vco_min(&pll_m_params);
+ clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLL_USE_LOCK,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLM_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m",
+ CLK_SET_RATE_PARENT, 1, 1);
+
+ /* PLLX */
+ _clip_vco_min(&pll_x_params);
+ if (_setup_dynamic_ramp(&pll_x_params, clk_base) >= 0) {
+ _init_iddq(&pll_x_params, clk_base);
+ clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
+ pmc, CLK_IGNORE_UNUSED, 0, &pll_x_params,
+ TEGRA_PLL_USE_LOCK, pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+ }
+
+ /* PLLX_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_x_out0", NULL);
+ clks[pll_x_out0] = clk;
+
+ /* PLLU */
+ val = readl(clk_base + pll_u_params.base_reg);
+ val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+ writel(val, clk_base + pll_u_params.base_reg);
+
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
+ 0, &pll_u_params, TEGRA_PLLU |
+ TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK, pll_u_freq_table, &pll_u_lock);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ tegra114_utmi_param_configure(clk_base);
+
+ /* PLLU_480M */
+ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
+ CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
+ 22, 0, &pll_u_lock);
+ clk_register_clkdev(clk, "pll_u_480M", NULL);
+ clks[pll_u_480M] = clk;
+
+ /* PLLU_60M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_60M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 8);
+ clk_register_clkdev(clk, "pll_u_60M", NULL);
+ clks[pll_u_60M] = clk;
+
+ /* PLLU_48M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_48M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 10);
+ clk_register_clkdev(clk, "pll_u_48M", NULL);
+ clks[pll_u_48M] = clk;
+
+ /* PLLU_12M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_12M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 40);
+ clk_register_clkdev(clk, "pll_u_12M", NULL);
+ clks[pll_u_12M] = clk;
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
+ 0, &pll_d_params,
+ TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK, pll_d_freq_table, &pll_d_lock);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLD2 */
+ clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc, 0,
+ 0, &pll_d2_params,
+ TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK, pll_d_freq_table, &pll_d2_lock);
+ clk_register_clkdev(clk, "pll_d2", NULL);
+ clks[pll_d2] = clk;
+
+ /* PLLD2_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d2_out0", NULL);
+ clks[pll_d2_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc, 0,
+ 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLRE */
+ _clip_vco_min(&pll_re_vco_params);
+ clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
+ 0, 0, &pll_re_vco_params, TEGRA_PLL_USE_LOCK,
+ NULL, &pll_re_lock, pll_ref_freq);
+ clk_register_clkdev(clk, "pll_re_vco", NULL);
+ clks[pll_re_vco] = clk;
+
+ clk = clk_register_divider_table(NULL, "pll_re_out", "pll_re_vco", 0,
+ clk_base + PLLRE_BASE, 16, 4, 0,
+ pll_re_div_table, &pll_re_lock);
+ clk_register_clkdev(clk, "pll_re_out", NULL);
+ clks[pll_re_out] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_re_vco",
+ clk_base, 0, 100000000, &pll_e_params,
+ pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e_out0", NULL);
+ clks[pll_e_out0] = clk;
+}
+
+static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
+};
+
+static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern1",
+};
+
+static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern2",
+};
+
+static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern3",
+};
+
+static void __init tegra114_audio_clk_init(void __iomem *clk_base)
+{
+ struct clk *clk;
+
+ /* spdif_in_sync */
+ clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
+ 24000000);
+ clk_register_clkdev(clk, "spdif_in_sync", NULL);
+ clks[spdif_in_sync] = clk;
+
+ /* i2s0_sync */
+ clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s0_sync", NULL);
+ clks[i2s0_sync] = clk;
+
+ /* i2s1_sync */
+ clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s1_sync", NULL);
+ clks[i2s1_sync] = clk;
+
+ /* i2s2_sync */
+ clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s2_sync", NULL);
+ clks[i2s2_sync] = clk;
+
+ /* i2s3_sync */
+ clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s3_sync", NULL);
+ clks[i2s3_sync] = clk;
+
+ /* i2s4_sync */
+ clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s4_sync", NULL);
+ clks[i2s4_sync] = clk;
+
+ /* vimclk_sync */
+ clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "vimclk_sync", NULL);
+ clks[vimclk_sync] = clk;
+
+ /* audio0 */
+ clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0,
+ NULL);
+ clks[audio0_mux] = clk;
+ clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio0", NULL);
+ clks[audio0] = clk;
+
+ /* audio1 */
+ clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0,
+ NULL);
+ clks[audio1_mux] = clk;
+ clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio1", NULL);
+ clks[audio1] = clk;
+
+ /* audio2 */
+ clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0,
+ NULL);
+ clks[audio2_mux] = clk;
+ clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio2", NULL);
+ clks[audio2] = clk;
+
+ /* audio3 */
+ clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0,
+ NULL);
+ clks[audio3_mux] = clk;
+ clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio3", NULL);
+ clks[audio3] = clk;
+
+ /* audio4 */
+ clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0,
+ NULL);
+ clks[audio4_mux] = clk;
+ clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio4", NULL);
+ clks[audio4] = clk;
+
+ /* spdif */
+ clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0,
+ NULL);
+ clks[spdif_mux] = clk;
+ clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "spdif", NULL);
+ clks[spdif] = clk;
+
+ /* audio0_2x */
+ clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1,
+ 0, &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 113, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio0_2x", NULL);
+ clks[audio0_2x] = clk;
+
+ /* audio1_2x */
+ clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1,
+ 0, &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 114, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio1_2x", NULL);
+ clks[audio1_2x] = clk;
+
+ /* audio2_2x */
+ clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1,
+ 0, &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 115, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio2_2x", NULL);
+ clks[audio2_2x] = clk;
+
+ /* audio3_2x */
+ clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1,
+ 0, &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 116, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio3_2x", NULL);
+ clks[audio3_2x] = clk;
+
+ /* audio4_2x */
+ clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1,
+ 0, &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 117, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio4_2x", NULL);
+ clks[audio4_2x] = clk;
+
+ /* spdif_2x */
+ clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1,
+ 0, &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 118,
+ &periph_v_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "spdif_2x", NULL);
+ clks[spdif_2x] = clk;
+}
+
+static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
+{
+ struct clk *clk;
+
+ /* clk_out_1 */
+ clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
+ &clk_out_lock);
+ clks[clk_out_1_mux] = clk;
+ clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern1", "clk_out_1");
+ clks[clk_out_1] = clk;
+
+ /* clk_out_2 */
+ clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
+ &clk_out_lock);
+ clks[clk_out_2_mux] = clk;
+ clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern2", "clk_out_2");
+ clks[clk_out_2] = clk;
+
+ /* clk_out_3 */
+ clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
+ &clk_out_lock);
+ clks[clk_out_3_mux] = clk;
+ clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern3", "clk_out_3");
+ clks[clk_out_3] = clk;
+
+ /* blink */
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+
+}
+
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "unused",
+ "clk_32k", "pll_m_out1" };
+
+static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p", "pll_p_out4", "unused",
+ "unused", "pll_x" };
+
+static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p", "pll_p_out4", "unused",
+ "unused", "pll_x", "pll_x_out0" };
+
+static void __init tegra114_super_clk_init(void __iomem *clk_base)
+{
+ struct clk *clk;
+
+ /* CCLKG */
+ clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ ARRAY_SIZE(cclk_g_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKG_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk_g", NULL);
+ clks[cclk_g] = clk;
+
+ /* CCLKLP */
+ clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
+ ARRAY_SIZE(cclk_lp_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKLP_BURST_POLICY,
+ 0, 4, 8, 9, NULL);
+ clk_register_clkdev(clk, "cclk_lp", NULL);
+ clks[cclk_lp] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+ 7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+ 3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+}
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", mux_pllaout0_audio0_2x_pllp_clkm, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", mux_pllaout0_audio1_2x_pllp_clkm, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", mux_pllaout0_audio2_2x_pllp_clkm, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", mux_pllaout0_audio3_2x_pllp_clkm, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
+ TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", mux_pllaout0_audio4_2x_pllp_clkm, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", mux_pllaout0_audio_2x_pllp_clkm, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", mux_pllp_pllc_pllm, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("pwm", NULL, "pwm", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_PWM, 17, &periph_l_regs, TEGRA_PERIPH_ON_APB, pwm),
+ TEGRA_INIT_DATA_MUX("adx", NULL, "adx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX, 154, &periph_w_regs, TEGRA_PERIPH_ON_APB, adx),
+ TEGRA_INIT_DATA_MUX("amx", NULL, "amx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX, 153, &periph_w_regs, TEGRA_PERIPH_ON_APB, amx),
+ TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, TEGRA_PERIPH_ON_APB, hda),
+ TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, TEGRA_PERIPH_ON_APB, hda2codec_2x),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "tegra11-spi.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "tegra11-spi.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "tegra11-spi.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "tegra11-spi.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("sbc5", NULL, "tegra11-spi.4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
+ TEGRA_INIT_DATA_MUX("sbc6", NULL, "tegra11-spi.5", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
+ TEGRA_INIT_DATA_MUX8("ndflash", NULL, "tegra_nand", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
+ TEGRA_INIT_DATA_MUX8("ndspeed", NULL, "tegra_nand_speed", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_MUX_FLAGS("csite", NULL, "csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite, CLK_IGNORE_UNUSED),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
+ TEGRA_INIT_DATA_MUX("trace", NULL, "trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, &periph_u_regs, TEGRA_PERIPH_ON_APB, trace),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_I2C("i2c1", "div-clk", "tegra11-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, i2c1),
+ TEGRA_INIT_DATA_I2C("i2c2", "div-clk", "tegra11-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, i2c2),
+ TEGRA_INIT_DATA_I2C("i2c3", "div-clk", "tegra11-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, i2c3),
+ TEGRA_INIT_DATA_I2C("i2c4", "div-clk", "tegra11-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, i2c4),
+ TEGRA_INIT_DATA_I2C("i2c5", "div-clk", "tegra11-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, i2c5),
+ TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
+ TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
+ TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
+ TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
+ TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, &periph_l_regs, 0, gr_3d),
+ TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr_2d),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_INT8("vi", "vi", "tegra_camera", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_INT8("epp", NULL, "epp", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_INT8("msenc", NULL, "msenc", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_MSENC, 91, &periph_h_regs, TEGRA_PERIPH_WAR_1005168, msenc),
+ TEGRA_INIT_DATA_INT8("tsec", NULL, "tsec", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_TSEC, 83, &periph_u_regs, 0, tsec),
+ TEGRA_INIT_DATA_INT8("host1x", NULL, "host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA_MUX("cilab", "cilab", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILAB, 144, &periph_w_regs, 0, cilab),
+ TEGRA_INIT_DATA_MUX("cilcd", "cilcd", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILCD, 145, &periph_w_regs, 0, cilcd),
+ TEGRA_INIT_DATA_MUX("cile", "cile", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILE, 146, &periph_w_regs, 0, cile),
+ TEGRA_INIT_DATA_MUX("dsialp", "dsialp", "tegradc.0", mux_pllp_pllc_clkm, CLK_SOURCE_DSIALP, 147, &periph_w_regs, 0, dsialp),
+ TEGRA_INIT_DATA_MUX("dsiblp", "dsiblp", "tegradc.1", mux_pllp_pllc_clkm, CLK_SOURCE_DSIBLP, 148, &periph_w_regs, 0, dsiblp),
+ TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllp_pllc_clkm_clk32, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
+ TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
+ TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
+ TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
+ TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
+ TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
+ TEGRA_INIT_DATA_INT8("se", NULL, "se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, TEGRA_PERIPH_ON_APB, se),
+ TEGRA_INIT_DATA_INT_FLAGS("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect, CLK_IGNORE_UNUSED),
+ TEGRA_INIT_DATA_MUX8("soc_therm", NULL, "soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, &periph_u_regs, TEGRA_PERIPH_ON_APB, soc_therm),
+ TEGRA_INIT_DATA_XUSB("xusb_host_src", "host_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, &periph_w_regs, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, xusb_host_src),
+ TEGRA_INIT_DATA_XUSB("xusb_falcon_src", "falcon_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_falcon_src),
+ TEGRA_INIT_DATA_XUSB("xusb_fs_src", "fs_src", "tegra_xhci", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_fs_src),
+ TEGRA_INIT_DATA_XUSB("xusb_ss_src", "ss_src", "tegra_xhci", mux_clkm_pllre_clk32_480M_pllc_ref, CLK_SOURCE_XUSB_SS_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_ss_src),
+ TEGRA_INIT_DATA_XUSB("xusb_dev_src", "dev_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, &periph_u_regs, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, xusb_dev_src),
+ TEGRA_INIT_DATA_AUDIO("d_audio", "d_audio", "tegra30-ahub", CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, TEGRA_PERIPH_ON_APB, d_audio),
+ TEGRA_INIT_DATA_AUDIO("dam0", NULL, "tegra30-dam.0", CLK_SOURCE_DAM0, 108, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam0),
+ TEGRA_INIT_DATA_AUDIO("dam1", NULL, "tegra30-dam.1", CLK_SOURCE_DAM1, 109, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam1),
+ TEGRA_INIT_DATA_AUDIO("dam2", NULL, "tegra30-dam.2", CLK_SOURCE_DAM2, 110, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam2),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, &periph_l_regs, 0, disp2),
+};
+
+static __init void tegra114_periph_clk_init(void __iomem *clk_base)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+ u32 val;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base,
+ 0, 34, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_ON_APB |
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_ON_APB |
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[kbc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
+ 0, 5, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kfuse */
+ clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 40,
+ &periph_h_regs, periph_clk_enb_refcnt);
+ clks[kfuse] = clk;
+
+ /* fuse */
+ clk = tegra_clk_register_periph_gate("fuse", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 39,
+ &periph_h_regs, periph_clk_enb_refcnt);
+ clks[fuse] = clk;
+
+ /* fuse_burn */
+ clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 39,
+ &periph_h_regs, periph_clk_enb_refcnt);
+ clks[fuse_burn] = clk;
+
+ /* apbif */
+ clk = tegra_clk_register_periph_gate("apbif", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 107,
+ &periph_v_regs, periph_clk_enb_refcnt);
+ clks[apbif] = clk;
+
+ /* hda2hdmi */
+ clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 128,
+ &periph_w_regs, periph_clk_enb_refcnt);
+ clks[hda2hdmi] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0,
+ 29, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base,
+ 0, 62, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base,
+ 0, 63, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[bsev] = clk;
+
+ /* mipi-cal */
+ clk = tegra_clk_register_periph_gate("mipi-cal", "clk_m", 0, clk_base,
+ 0, 56, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[mipi_cal] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base,
+ 0, 22, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base,
+ 0, 58, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base,
+ 0, 59, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[usb3] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0,
+ 23, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clks[isp] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET, clk_base, 0, 92,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clks[csus] = clk;
+
+ /* dds */
+ clk = tegra_clk_register_periph_gate("dds", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 150,
+ &periph_w_regs, periph_clk_enb_refcnt);
+ clks[dds] = clk;
+
+ /* dp2 */
+ clk = tegra_clk_register_periph_gate("dp2", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 152,
+ &periph_w_regs, periph_clk_enb_refcnt);
+ clks[dp2] = clk;
+
+ /* dtv */
+ clk = tegra_clk_register_periph_gate("dtv", "clk_m",
+ TEGRA_PERIPH_ON_APB, clk_base, 0, 79,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clks[dtv] = clk;
+
+ /* dsia */
+ clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
+ clks[dsia_mux] = clk;
+ clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
+ 0, 48, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[dsia] = clk;
+
+ /* dsib */
+ clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
+ clks[dsib_mux] = clk;
+ clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
+ 0, 82, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clks[dsib] = clk;
+
+ /* xusb_hs_src */
+ val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
+ val |= BIT(25); /* always select PLLU_60M */
+ writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
+
+ clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
+ 1, 1);
+ clks[xusb_hs_src] = clk;
+
+ /* xusb_host */
+ clk = tegra_clk_register_periph_gate("xusb_host", "xusb_host_src", 0,
+ clk_base, 0, 89, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clks[xusb_host] = clk;
+
+ /* xusb_ss */
+ clk = tegra_clk_register_periph_gate("xusb_ss", "xusb_ss_src", 0,
+ clk_base, 0, 156, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clks[xusb_host] = clk;
+
+ /* xusb_dev */
+ clk = tegra_clk_register_periph_gate("xusb_dev", "xusb_dev_src", 0,
+ clk_base, 0, 95, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clks[xusb_dev] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 29, 3, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base,
+ CLK_IGNORE_UNUSED, 57, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clks[emc] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset, data->flags);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names, data->num_parents,
+ &data->periph, clk_base, data->offset);
+ clks[data->clk_id] = clk;
+ }
+}
+
+static struct tegra_cpu_car_ops tegra114_cpu_car_ops;
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra114-pmc" },
+ {},
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {uarta, pll_p, 408000000, 0},
+ {uartb, pll_p, 408000000, 0},
+ {uartc, pll_p, 408000000, 0},
+ {uartd, pll_p, 408000000, 0},
+ {pll_a, clk_max, 564480000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {extern1, pll_a_out0, 0, 1},
+ {clk_out_1_mux, extern1, 0, 1},
+ {clk_out_1, clk_max, 0, 1},
+ {i2s0, pll_a_out0, 11289600, 0},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {i2s3, pll_a_out0, 11289600, 0},
+ {i2s4, pll_a_out0, 11289600, 0},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+};
+
+static void __init tegra114_clock_apply_init_table(void)
+{
+ tegra_init_from_table(init_table, clks, clk_max);
+}
+
+void __init tegra114_clock_init(struct device_node *np)
+{
+ struct device_node *node;
+ int i;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("ioremap tegra114 CAR failed\n");
+ return;
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ WARN_ON(1);
+ return;
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ WARN_ON(1);
+ return;
+ }
+
+ if (tegra114_osc_clk_init(clk_base) < 0)
+ return;
+
+ tegra114_fixed_clk_init(clk_base);
+ tegra114_pll_init(clk_base, pmc_base);
+ tegra114_periph_clk_init(clk_base);
+ tegra114_audio_clk_init(clk_base);
+ tegra114_pmc_clk_init(pmc_base);
+ tegra114_super_clk_init(clk_base);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err
+ ("Tegra114 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_clk_apply_init_table = tegra114_clock_apply_init_table;
+
+ tegra_cpu_car_ops = &tegra114_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index f873dce..8292a00 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -86,8 +86,8 @@
#define PLLE_BASE 0xe8
#define PLLE_MISC 0xec
-#define PLL_BASE_LOCK 27
-#define PLLE_MISC_LOCK 11
+#define PLL_BASE_LOCK BIT(27)
+#define PLLE_MISC_LOCK BIT(11)
#define PLL_MISC_LOCK_ENABLE 18
#define PLLDU_MISC_LOCK_ENABLE 22
@@ -236,7 +236,7 @@ enum tegra20_clk {
dvc, dsi, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
pex, owr, afi, csite, pcie_xclk, avpucq = 75, la, irama = 84, iramb,
- iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev1, cdev2,
+ iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev2, cdev1,
uartb = 96, vfir, spdif_in, spdif_out, vi, vi_sensor, tvo, cve,
osc, clk_32k, clk_m, sclk, cclk, hclk, pclk, blink, pll_a, pll_a_out0,
pll_c, pll_c_out1, pll_d, pll_d_out0, pll_e, pll_m, pll_m_out1,
@@ -248,125 +248,125 @@ static struct clk *clks[clk_max];
static struct clk_onecell_data clk_data;
static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
- { 12000000, 600000000, 600, 12, 1, 8 },
- { 13000000, 600000000, 600, 13, 1, 8 },
- { 19200000, 600000000, 500, 16, 1, 6 },
- { 26000000, 600000000, 600, 26, 1, 8 },
+ { 12000000, 600000000, 600, 12, 0, 8 },
+ { 13000000, 600000000, 600, 13, 0, 8 },
+ { 19200000, 600000000, 500, 16, 0, 6 },
+ { 26000000, 600000000, 600, 26, 0, 8 },
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
- { 12000000, 666000000, 666, 12, 1, 8},
- { 13000000, 666000000, 666, 13, 1, 8},
- { 19200000, 666000000, 555, 16, 1, 8},
- { 26000000, 666000000, 666, 26, 1, 8},
- { 12000000, 600000000, 600, 12, 1, 8},
- { 13000000, 600000000, 600, 13, 1, 8},
- { 19200000, 600000000, 375, 12, 1, 6},
- { 26000000, 600000000, 600, 26, 1, 8},
+ { 12000000, 666000000, 666, 12, 0, 8},
+ { 13000000, 666000000, 666, 13, 0, 8},
+ { 19200000, 666000000, 555, 16, 0, 8},
+ { 26000000, 666000000, 666, 26, 0, 8},
+ { 12000000, 600000000, 600, 12, 0, 8},
+ { 13000000, 600000000, 600, 13, 0, 8},
+ { 19200000, 600000000, 375, 12, 0, 6},
+ { 26000000, 600000000, 600, 26, 0, 8},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
- { 12000000, 216000000, 432, 12, 2, 8},
- { 13000000, 216000000, 432, 13, 2, 8},
- { 19200000, 216000000, 90, 4, 2, 1},
- { 26000000, 216000000, 432, 26, 2, 8},
- { 12000000, 432000000, 432, 12, 1, 8},
- { 13000000, 432000000, 432, 13, 1, 8},
- { 19200000, 432000000, 90, 4, 1, 1},
- { 26000000, 432000000, 432, 26, 1, 8},
+ { 12000000, 216000000, 432, 12, 1, 8},
+ { 13000000, 216000000, 432, 13, 1, 8},
+ { 19200000, 216000000, 90, 4, 1, 1},
+ { 26000000, 216000000, 432, 26, 1, 8},
+ { 12000000, 432000000, 432, 12, 0, 8},
+ { 13000000, 432000000, 432, 13, 0, 8},
+ { 19200000, 432000000, 90, 4, 0, 1},
+ { 26000000, 432000000, 432, 26, 0, 8},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
- { 28800000, 56448000, 49, 25, 1, 1},
- { 28800000, 73728000, 64, 25, 1, 1},
- { 28800000, 24000000, 5, 6, 1, 1},
+ { 28800000, 56448000, 49, 25, 0, 1},
+ { 28800000, 73728000, 64, 25, 0, 1},
+ { 28800000, 24000000, 5, 6, 0, 1},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
- { 12000000, 216000000, 216, 12, 1, 4},
- { 13000000, 216000000, 216, 13, 1, 4},
- { 19200000, 216000000, 135, 12, 1, 3},
- { 26000000, 216000000, 216, 26, 1, 4},
+ { 12000000, 216000000, 216, 12, 0, 4},
+ { 13000000, 216000000, 216, 13, 0, 4},
+ { 19200000, 216000000, 135, 12, 0, 3},
+ { 26000000, 216000000, 216, 26, 0, 4},
- { 12000000, 594000000, 594, 12, 1, 8},
- { 13000000, 594000000, 594, 13, 1, 8},
- { 19200000, 594000000, 495, 16, 1, 8},
- { 26000000, 594000000, 594, 26, 1, 8},
+ { 12000000, 594000000, 594, 12, 0, 8},
+ { 13000000, 594000000, 594, 13, 0, 8},
+ { 19200000, 594000000, 495, 16, 0, 8},
+ { 26000000, 594000000, 594, 26, 0, 8},
- { 12000000, 1000000000, 1000, 12, 1, 12},
- { 13000000, 1000000000, 1000, 13, 1, 12},
- { 19200000, 1000000000, 625, 12, 1, 8},
- { 26000000, 1000000000, 1000, 26, 1, 12},
+ { 12000000, 1000000000, 1000, 12, 0, 12},
+ { 13000000, 1000000000, 1000, 13, 0, 12},
+ { 19200000, 1000000000, 625, 12, 0, 8},
+ { 26000000, 1000000000, 1000, 26, 0, 12},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
- { 12000000, 480000000, 960, 12, 2, 0},
- { 13000000, 480000000, 960, 13, 2, 0},
- { 19200000, 480000000, 200, 4, 2, 0},
- { 26000000, 480000000, 960, 26, 2, 0},
+ { 12000000, 480000000, 960, 12, 0, 0},
+ { 13000000, 480000000, 960, 13, 0, 0},
+ { 19200000, 480000000, 200, 4, 0, 0},
+ { 26000000, 480000000, 960, 26, 0, 0},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
/* 1 GHz */
- { 12000000, 1000000000, 1000, 12, 1, 12},
- { 13000000, 1000000000, 1000, 13, 1, 12},
- { 19200000, 1000000000, 625, 12, 1, 8},
- { 26000000, 1000000000, 1000, 26, 1, 12},
+ { 12000000, 1000000000, 1000, 12, 0, 12},
+ { 13000000, 1000000000, 1000, 13, 0, 12},
+ { 19200000, 1000000000, 625, 12, 0, 8},
+ { 26000000, 1000000000, 1000, 26, 0, 12},
/* 912 MHz */
- { 12000000, 912000000, 912, 12, 1, 12},
- { 13000000, 912000000, 912, 13, 1, 12},
- { 19200000, 912000000, 760, 16, 1, 8},
- { 26000000, 912000000, 912, 26, 1, 12},
+ { 12000000, 912000000, 912, 12, 0, 12},
+ { 13000000, 912000000, 912, 13, 0, 12},
+ { 19200000, 912000000, 760, 16, 0, 8},
+ { 26000000, 912000000, 912, 26, 0, 12},
/* 816 MHz */
- { 12000000, 816000000, 816, 12, 1, 12},
- { 13000000, 816000000, 816, 13, 1, 12},
- { 19200000, 816000000, 680, 16, 1, 8},
- { 26000000, 816000000, 816, 26, 1, 12},
+ { 12000000, 816000000, 816, 12, 0, 12},
+ { 13000000, 816000000, 816, 13, 0, 12},
+ { 19200000, 816000000, 680, 16, 0, 8},
+ { 26000000, 816000000, 816, 26, 0, 12},
/* 760 MHz */
- { 12000000, 760000000, 760, 12, 1, 12},
- { 13000000, 760000000, 760, 13, 1, 12},
- { 19200000, 760000000, 950, 24, 1, 8},
- { 26000000, 760000000, 760, 26, 1, 12},
+ { 12000000, 760000000, 760, 12, 0, 12},
+ { 13000000, 760000000, 760, 13, 0, 12},
+ { 19200000, 760000000, 950, 24, 0, 8},
+ { 26000000, 760000000, 760, 26, 0, 12},
/* 750 MHz */
- { 12000000, 750000000, 750, 12, 1, 12},
- { 13000000, 750000000, 750, 13, 1, 12},
- { 19200000, 750000000, 625, 16, 1, 8},
- { 26000000, 750000000, 750, 26, 1, 12},
+ { 12000000, 750000000, 750, 12, 0, 12},
+ { 13000000, 750000000, 750, 13, 0, 12},
+ { 19200000, 750000000, 625, 16, 0, 8},
+ { 26000000, 750000000, 750, 26, 0, 12},
/* 608 MHz */
- { 12000000, 608000000, 608, 12, 1, 12},
- { 13000000, 608000000, 608, 13, 1, 12},
- { 19200000, 608000000, 380, 12, 1, 8},
- { 26000000, 608000000, 608, 26, 1, 12},
+ { 12000000, 608000000, 608, 12, 0, 12},
+ { 13000000, 608000000, 608, 13, 0, 12},
+ { 19200000, 608000000, 380, 12, 0, 8},
+ { 26000000, 608000000, 608, 26, 0, 12},
/* 456 MHz */
- { 12000000, 456000000, 456, 12, 1, 12},
- { 13000000, 456000000, 456, 13, 1, 12},
- { 19200000, 456000000, 380, 16, 1, 8},
- { 26000000, 456000000, 456, 26, 1, 12},
+ { 12000000, 456000000, 456, 12, 0, 12},
+ { 13000000, 456000000, 456, 13, 0, 12},
+ { 19200000, 456000000, 380, 16, 0, 8},
+ { 26000000, 456000000, 456, 26, 0, 12},
/* 312 MHz */
- { 12000000, 312000000, 312, 12, 1, 12},
- { 13000000, 312000000, 312, 13, 1, 12},
- { 19200000, 312000000, 260, 16, 1, 8},
- { 26000000, 312000000, 312, 26, 1, 12},
+ { 12000000, 312000000, 312, 12, 0, 12},
+ { 13000000, 312000000, 312, 13, 0, 12},
+ { 19200000, 312000000, 260, 16, 0, 8},
+ { 26000000, 312000000, 312, 26, 0, 12},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
- { 12000000, 100000000, 200, 24, 1, 0 },
+ { 12000000, 100000000, 200, 24, 0, 0 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -380,7 +380,7 @@ static struct tegra_clk_pll_params pll_c_params = {
.vco_max = 1400000000,
.base_reg = PLLC_BASE,
.misc_reg = PLLC_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -394,7 +394,7 @@ static struct tegra_clk_pll_params pll_m_params = {
.vco_max = 1200000000,
.base_reg = PLLM_BASE,
.misc_reg = PLLM_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -408,7 +408,7 @@ static struct tegra_clk_pll_params pll_p_params = {
.vco_max = 1400000000,
.base_reg = PLLP_BASE,
.misc_reg = PLLP_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -422,7 +422,7 @@ static struct tegra_clk_pll_params pll_a_params = {
.vco_max = 1400000000,
.base_reg = PLLA_BASE,
.misc_reg = PLLA_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -436,11 +436,17 @@ static struct tegra_clk_pll_params pll_d_params = {
.vco_max = 1000000000,
.base_reg = PLLD_BASE,
.misc_reg = PLLD_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
};
+static struct pdiv_map pllu_p[] = {
+ { .pdiv = 1, .hw_val = 1 },
+ { .pdiv = 2, .hw_val = 0 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
static struct tegra_clk_pll_params pll_u_params = {
.input_min = 2000000,
.input_max = 40000000,
@@ -450,9 +456,10 @@ static struct tegra_clk_pll_params pll_u_params = {
.vco_max = 960000000,
.base_reg = PLLU_BASE,
.misc_reg = PLLU_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .pdiv_tohw = pllu_p,
};
static struct tegra_clk_pll_params pll_x_params = {
@@ -464,7 +471,7 @@ static struct tegra_clk_pll_params pll_x_params = {
.vco_max = 1200000000,
.base_reg = PLLX_BASE,
.misc_reg = PLLX_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -478,7 +485,7 @@ static struct tegra_clk_pll_params pll_e_params = {
.vco_max = 0,
.base_reg = PLLE_BASE,
.misc_reg = PLLE_MISC,
- .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_mask = PLLE_MISC_LOCK,
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 0,
};
@@ -711,8 +718,8 @@ static void tegra20_pll_init(void)
}
static const char *cclk_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
- "pll_p_cclk", "pll_p_out4_cclk",
- "pll_p_out3_cclk", "clk_d", "pll_x" };
+ "pll_p", "pll_p_out4",
+ "pll_p_out3", "clk_d", "pll_x" };
static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
"pll_p_out3", "pll_p_out2", "clk_d",
"clk_32k", "pll_m_out1" };
@@ -721,38 +728,6 @@ static void tegra20_super_clk_init(void)
{
struct clk *clk;
- /*
- * DIV_U71 dividers for CCLK, these dividers are used only
- * if parent clock is fixed rate.
- */
-
- /*
- * Clock input to cclk divided from pll_p using
- * U71 divider of cclk.
- */
- clk = tegra_clk_register_divider("pll_p_cclk", "pll_p",
- clk_base + SUPER_CCLK_DIVIDER, 0,
- TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
- clk_register_clkdev(clk, "pll_p_cclk", NULL);
-
- /*
- * Clock input to cclk divided from pll_p_out3 using
- * U71 divider of cclk.
- */
- clk = tegra_clk_register_divider("pll_p_out3_cclk", "pll_p_out3",
- clk_base + SUPER_CCLK_DIVIDER, 0,
- TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
- clk_register_clkdev(clk, "pll_p_out3_cclk", NULL);
-
- /*
- * Clock input to cclk divided from pll_p_out4 using
- * U71 divider of cclk.
- */
- clk = tegra_clk_register_divider("pll_p_out4_cclk", "pll_p_out4",
- clk_base + SUPER_CCLK_DIVIDER, 0,
- TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
- clk_register_clkdev(clk, "pll_p_out4_cclk", NULL);
-
/* CCLK */
clk = tegra_clk_register_super_mux("cclk", cclk_parents,
ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
@@ -1044,7 +1019,7 @@ static void __init tegra20_periph_clk_init(void)
data = &tegra_periph_clk_list[i];
clk = tegra_clk_register_periph(data->name, data->parent_names,
data->num_parents, &data->periph,
- clk_base, data->offset);
+ clk_base, data->offset, data->flags);
clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
@@ -1279,9 +1254,16 @@ static __initdata struct tegra_clk_init_table init_table[] = {
{host1x, pll_c, 150000000, 0},
{disp1, pll_p, 600000000, 0},
{disp2, pll_p, 600000000, 0},
+ {gr2d, pll_c, 300000000, 0},
+ {gr3d, pll_c, 300000000, 0},
{clk_max, clk_max, 0, 0}, /* This MUST be the last entry */
};
+static void __init tegra20_clock_apply_init_table(void)
+{
+ tegra_init_from_table(init_table, clks, clk_max);
+}
+
/*
* Some clocks may be used by different drivers depending on the board
* configuration. List those here to register them twice in the clock lookup
@@ -1348,7 +1330,7 @@ void __init tegra20_clock_init(struct device_node *np)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_clk_apply_init_table = tegra20_clock_apply_init_table;
tegra_cpu_car_ops = &tegra20_cpu_car_ops;
}
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index ba6f51b..c6921f5 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -22,8 +22,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk/tegra.h>
-
-#include <mach/powergate.h>
+#include <linux/tegra-powergate.h>
#include "clk.h"
@@ -116,8 +115,8 @@
#define PLLDU_MISC_LOCK_ENABLE 22
#define PLLE_MISC_LOCK_ENABLE 9
-#define PLL_BASE_LOCK 27
-#define PLLE_MISC_LOCK 11
+#define PLL_BASE_LOCK BIT(27)
+#define PLLE_MISC_LOCK BIT(11)
#define PLLE_AUX 0x48c
#define PLLC_OUT 0x84
@@ -330,7 +329,7 @@ enum tegra30_clk {
usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
pcie, owr, afi, csite, pciex, avpucq, la, dtv = 79, ndspeed, i2cslow,
dsib, irama = 84, iramb, iramc, iramd, cram2, audio_2x = 90, csus = 92,
- cdev1, cdev2, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
+ cdev2, cdev1, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
i2c4, sbc5, sbc6, d_audio, apbif, dam0, dam1, dam2, hda2codec_2x,
atomics, audio0_2x, audio1_2x, audio2_2x, audio3_2x, audio4_2x,
spdif_2x, actmon, extern1, extern2, extern3, sata_oob, sata, hda,
@@ -374,164 +373,170 @@ static const struct utmi_clk_param utmi_parameters[] = {
};
static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
- { 12000000, 1040000000, 520, 6, 1, 8},
- { 13000000, 1040000000, 480, 6, 1, 8},
- { 16800000, 1040000000, 495, 8, 1, 8}, /* actual: 1039.5 MHz */
- { 19200000, 1040000000, 325, 6, 1, 6},
- { 26000000, 1040000000, 520, 13, 1, 8},
-
- { 12000000, 832000000, 416, 6, 1, 8},
- { 13000000, 832000000, 832, 13, 1, 8},
- { 16800000, 832000000, 396, 8, 1, 8}, /* actual: 831.6 MHz */
- { 19200000, 832000000, 260, 6, 1, 8},
- { 26000000, 832000000, 416, 13, 1, 8},
-
- { 12000000, 624000000, 624, 12, 1, 8},
- { 13000000, 624000000, 624, 13, 1, 8},
- { 16800000, 600000000, 520, 14, 1, 8},
- { 19200000, 624000000, 520, 16, 1, 8},
- { 26000000, 624000000, 624, 26, 1, 8},
-
- { 12000000, 600000000, 600, 12, 1, 8},
- { 13000000, 600000000, 600, 13, 1, 8},
- { 16800000, 600000000, 500, 14, 1, 8},
- { 19200000, 600000000, 375, 12, 1, 6},
- { 26000000, 600000000, 600, 26, 1, 8},
-
- { 12000000, 520000000, 520, 12, 1, 8},
- { 13000000, 520000000, 520, 13, 1, 8},
- { 16800000, 520000000, 495, 16, 1, 8}, /* actual: 519.75 MHz */
- { 19200000, 520000000, 325, 12, 1, 6},
- { 26000000, 520000000, 520, 26, 1, 8},
-
- { 12000000, 416000000, 416, 12, 1, 8},
- { 13000000, 416000000, 416, 13, 1, 8},
- { 16800000, 416000000, 396, 16, 1, 8}, /* actual: 415.8 MHz */
- { 19200000, 416000000, 260, 12, 1, 6},
- { 26000000, 416000000, 416, 26, 1, 8},
+ { 12000000, 1040000000, 520, 6, 0, 8},
+ { 13000000, 1040000000, 480, 6, 0, 8},
+ { 16800000, 1040000000, 495, 8, 0, 8}, /* actual: 1039.5 MHz */
+ { 19200000, 1040000000, 325, 6, 0, 6},
+ { 26000000, 1040000000, 520, 13, 0, 8},
+
+ { 12000000, 832000000, 416, 6, 0, 8},
+ { 13000000, 832000000, 832, 13, 0, 8},
+ { 16800000, 832000000, 396, 8, 0, 8}, /* actual: 831.6 MHz */
+ { 19200000, 832000000, 260, 6, 0, 8},
+ { 26000000, 832000000, 416, 13, 0, 8},
+
+ { 12000000, 624000000, 624, 12, 0, 8},
+ { 13000000, 624000000, 624, 13, 0, 8},
+ { 16800000, 600000000, 520, 14, 0, 8},
+ { 19200000, 624000000, 520, 16, 0, 8},
+ { 26000000, 624000000, 624, 26, 0, 8},
+
+ { 12000000, 600000000, 600, 12, 0, 8},
+ { 13000000, 600000000, 600, 13, 0, 8},
+ { 16800000, 600000000, 500, 14, 0, 8},
+ { 19200000, 600000000, 375, 12, 0, 6},
+ { 26000000, 600000000, 600, 26, 0, 8},
+
+ { 12000000, 520000000, 520, 12, 0, 8},
+ { 13000000, 520000000, 520, 13, 0, 8},
+ { 16800000, 520000000, 495, 16, 0, 8}, /* actual: 519.75 MHz */
+ { 19200000, 520000000, 325, 12, 0, 6},
+ { 26000000, 520000000, 520, 26, 0, 8},
+
+ { 12000000, 416000000, 416, 12, 0, 8},
+ { 13000000, 416000000, 416, 13, 0, 8},
+ { 16800000, 416000000, 396, 16, 0, 8}, /* actual: 415.8 MHz */
+ { 19200000, 416000000, 260, 12, 0, 6},
+ { 26000000, 416000000, 416, 26, 0, 8},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
- { 12000000, 666000000, 666, 12, 1, 8},
- { 13000000, 666000000, 666, 13, 1, 8},
- { 16800000, 666000000, 555, 14, 1, 8},
- { 19200000, 666000000, 555, 16, 1, 8},
- { 26000000, 666000000, 666, 26, 1, 8},
- { 12000000, 600000000, 600, 12, 1, 8},
- { 13000000, 600000000, 600, 13, 1, 8},
- { 16800000, 600000000, 500, 14, 1, 8},
- { 19200000, 600000000, 375, 12, 1, 6},
- { 26000000, 600000000, 600, 26, 1, 8},
+ { 12000000, 666000000, 666, 12, 0, 8},
+ { 13000000, 666000000, 666, 13, 0, 8},
+ { 16800000, 666000000, 555, 14, 0, 8},
+ { 19200000, 666000000, 555, 16, 0, 8},
+ { 26000000, 666000000, 666, 26, 0, 8},
+ { 12000000, 600000000, 600, 12, 0, 8},
+ { 13000000, 600000000, 600, 13, 0, 8},
+ { 16800000, 600000000, 500, 14, 0, 8},
+ { 19200000, 600000000, 375, 12, 0, 6},
+ { 26000000, 600000000, 600, 26, 0, 8},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
- { 12000000, 216000000, 432, 12, 2, 8},
- { 13000000, 216000000, 432, 13, 2, 8},
- { 16800000, 216000000, 360, 14, 2, 8},
- { 19200000, 216000000, 360, 16, 2, 8},
- { 26000000, 216000000, 432, 26, 2, 8},
+ { 12000000, 216000000, 432, 12, 1, 8},
+ { 13000000, 216000000, 432, 13, 1, 8},
+ { 16800000, 216000000, 360, 14, 1, 8},
+ { 19200000, 216000000, 360, 16, 1, 8},
+ { 26000000, 216000000, 432, 26, 1, 8},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
- { 9600000, 564480000, 294, 5, 1, 4},
- { 9600000, 552960000, 288, 5, 1, 4},
- { 9600000, 24000000, 5, 2, 1, 1},
+ { 9600000, 564480000, 294, 5, 0, 4},
+ { 9600000, 552960000, 288, 5, 0, 4},
+ { 9600000, 24000000, 5, 2, 0, 1},
- { 28800000, 56448000, 49, 25, 1, 1},
- { 28800000, 73728000, 64, 25, 1, 1},
- { 28800000, 24000000, 5, 6, 1, 1},
+ { 28800000, 56448000, 49, 25, 0, 1},
+ { 28800000, 73728000, 64, 25, 0, 1},
+ { 28800000, 24000000, 5, 6, 0, 1},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
- { 12000000, 216000000, 216, 12, 1, 4},
- { 13000000, 216000000, 216, 13, 1, 4},
- { 16800000, 216000000, 180, 14, 1, 4},
- { 19200000, 216000000, 180, 16, 1, 4},
- { 26000000, 216000000, 216, 26, 1, 4},
-
- { 12000000, 594000000, 594, 12, 1, 8},
- { 13000000, 594000000, 594, 13, 1, 8},
- { 16800000, 594000000, 495, 14, 1, 8},
- { 19200000, 594000000, 495, 16, 1, 8},
- { 26000000, 594000000, 594, 26, 1, 8},
-
- { 12000000, 1000000000, 1000, 12, 1, 12},
- { 13000000, 1000000000, 1000, 13, 1, 12},
- { 19200000, 1000000000, 625, 12, 1, 8},
- { 26000000, 1000000000, 1000, 26, 1, 12},
+ { 12000000, 216000000, 216, 12, 0, 4},
+ { 13000000, 216000000, 216, 13, 0, 4},
+ { 16800000, 216000000, 180, 14, 0, 4},
+ { 19200000, 216000000, 180, 16, 0, 4},
+ { 26000000, 216000000, 216, 26, 0, 4},
+
+ { 12000000, 594000000, 594, 12, 0, 8},
+ { 13000000, 594000000, 594, 13, 0, 8},
+ { 16800000, 594000000, 495, 14, 0, 8},
+ { 19200000, 594000000, 495, 16, 0, 8},
+ { 26000000, 594000000, 594, 26, 0, 8},
+
+ { 12000000, 1000000000, 1000, 12, 0, 12},
+ { 13000000, 1000000000, 1000, 13, 0, 12},
+ { 19200000, 1000000000, 625, 12, 0, 8},
+ { 26000000, 1000000000, 1000, 26, 0, 12},
{ 0, 0, 0, 0, 0, 0 },
};
+static struct pdiv_map pllu_p[] = {
+ { .pdiv = 1, .hw_val = 1 },
+ { .pdiv = 2, .hw_val = 0 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
- { 12000000, 480000000, 960, 12, 2, 12},
- { 13000000, 480000000, 960, 13, 2, 12},
- { 16800000, 480000000, 400, 7, 2, 5},
- { 19200000, 480000000, 200, 4, 2, 3},
- { 26000000, 480000000, 960, 26, 2, 12},
+ { 12000000, 480000000, 960, 12, 0, 12},
+ { 13000000, 480000000, 960, 13, 0, 12},
+ { 16800000, 480000000, 400, 7, 0, 5},
+ { 19200000, 480000000, 200, 4, 0, 3},
+ { 26000000, 480000000, 960, 26, 0, 12},
{ 0, 0, 0, 0, 0, 0 },
};
static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
/* 1.7 GHz */
- { 12000000, 1700000000, 850, 6, 1, 8},
- { 13000000, 1700000000, 915, 7, 1, 8}, /* actual: 1699.2 MHz */
- { 16800000, 1700000000, 708, 7, 1, 8}, /* actual: 1699.2 MHz */
- { 19200000, 1700000000, 885, 10, 1, 8}, /* actual: 1699.2 MHz */
- { 26000000, 1700000000, 850, 13, 1, 8},
+ { 12000000, 1700000000, 850, 6, 0, 8},
+ { 13000000, 1700000000, 915, 7, 0, 8}, /* actual: 1699.2 MHz */
+ { 16800000, 1700000000, 708, 7, 0, 8}, /* actual: 1699.2 MHz */
+ { 19200000, 1700000000, 885, 10, 0, 8}, /* actual: 1699.2 MHz */
+ { 26000000, 1700000000, 850, 13, 0, 8},
/* 1.6 GHz */
- { 12000000, 1600000000, 800, 6, 1, 8},
- { 13000000, 1600000000, 738, 6, 1, 8}, /* actual: 1599.0 MHz */
- { 16800000, 1600000000, 857, 9, 1, 8}, /* actual: 1599.7 MHz */
- { 19200000, 1600000000, 500, 6, 1, 8},
- { 26000000, 1600000000, 800, 13, 1, 8},
+ { 12000000, 1600000000, 800, 6, 0, 8},
+ { 13000000, 1600000000, 738, 6, 0, 8}, /* actual: 1599.0 MHz */
+ { 16800000, 1600000000, 857, 9, 0, 8}, /* actual: 1599.7 MHz */
+ { 19200000, 1600000000, 500, 6, 0, 8},
+ { 26000000, 1600000000, 800, 13, 0, 8},
/* 1.5 GHz */
- { 12000000, 1500000000, 750, 6, 1, 8},
- { 13000000, 1500000000, 923, 8, 1, 8}, /* actual: 1499.8 MHz */
- { 16800000, 1500000000, 625, 7, 1, 8},
- { 19200000, 1500000000, 625, 8, 1, 8},
- { 26000000, 1500000000, 750, 13, 1, 8},
+ { 12000000, 1500000000, 750, 6, 0, 8},
+ { 13000000, 1500000000, 923, 8, 0, 8}, /* actual: 1499.8 MHz */
+ { 16800000, 1500000000, 625, 7, 0, 8},
+ { 19200000, 1500000000, 625, 8, 0, 8},
+ { 26000000, 1500000000, 750, 13, 0, 8},
/* 1.4 GHz */
- { 12000000, 1400000000, 700, 6, 1, 8},
- { 13000000, 1400000000, 969, 9, 1, 8}, /* actual: 1399.7 MHz */
- { 16800000, 1400000000, 1000, 12, 1, 8},
- { 19200000, 1400000000, 875, 12, 1, 8},
- { 26000000, 1400000000, 700, 13, 1, 8},
+ { 12000000, 1400000000, 700, 6, 0, 8},
+ { 13000000, 1400000000, 969, 9, 0, 8}, /* actual: 1399.7 MHz */
+ { 16800000, 1400000000, 1000, 12, 0, 8},
+ { 19200000, 1400000000, 875, 12, 0, 8},
+ { 26000000, 1400000000, 700, 13, 0, 8},
/* 1.3 GHz */
- { 12000000, 1300000000, 975, 9, 1, 8},
- { 13000000, 1300000000, 1000, 10, 1, 8},
- { 16800000, 1300000000, 928, 12, 1, 8}, /* actual: 1299.2 MHz */
- { 19200000, 1300000000, 812, 12, 1, 8}, /* actual: 1299.2 MHz */
- { 26000000, 1300000000, 650, 13, 1, 8},
+ { 12000000, 1300000000, 975, 9, 0, 8},
+ { 13000000, 1300000000, 1000, 10, 0, 8},
+ { 16800000, 1300000000, 928, 12, 0, 8}, /* actual: 1299.2 MHz */
+ { 19200000, 1300000000, 812, 12, 0, 8}, /* actual: 1299.2 MHz */
+ { 26000000, 1300000000, 650, 13, 0, 8},
/* 1.2 GHz */
- { 12000000, 1200000000, 1000, 10, 1, 8},
- { 13000000, 1200000000, 923, 10, 1, 8}, /* actual: 1199.9 MHz */
- { 16800000, 1200000000, 1000, 14, 1, 8},
- { 19200000, 1200000000, 1000, 16, 1, 8},
- { 26000000, 1200000000, 600, 13, 1, 8},
+ { 12000000, 1200000000, 1000, 10, 0, 8},
+ { 13000000, 1200000000, 923, 10, 0, 8}, /* actual: 1199.9 MHz */
+ { 16800000, 1200000000, 1000, 14, 0, 8},
+ { 19200000, 1200000000, 1000, 16, 0, 8},
+ { 26000000, 1200000000, 600, 13, 0, 8},
/* 1.1 GHz */
- { 12000000, 1100000000, 825, 9, 1, 8},
- { 13000000, 1100000000, 846, 10, 1, 8}, /* actual: 1099.8 MHz */
- { 16800000, 1100000000, 982, 15, 1, 8}, /* actual: 1099.8 MHz */
- { 19200000, 1100000000, 859, 15, 1, 8}, /* actual: 1099.5 MHz */
- { 26000000, 1100000000, 550, 13, 1, 8},
+ { 12000000, 1100000000, 825, 9, 0, 8},
+ { 13000000, 1100000000, 846, 10, 0, 8}, /* actual: 1099.8 MHz */
+ { 16800000, 1100000000, 982, 15, 0, 8}, /* actual: 1099.8 MHz */
+ { 19200000, 1100000000, 859, 15, 0, 8}, /* actual: 1099.5 MHz */
+ { 26000000, 1100000000, 550, 13, 0, 8},
/* 1 GHz */
- { 12000000, 1000000000, 1000, 12, 1, 8},
- { 13000000, 1000000000, 1000, 13, 1, 8},
- { 16800000, 1000000000, 833, 14, 1, 8}, /* actual: 999.6 MHz */
- { 19200000, 1000000000, 625, 12, 1, 8},
- { 26000000, 1000000000, 1000, 26, 1, 8},
+ { 12000000, 1000000000, 1000, 12, 0, 8},
+ { 13000000, 1000000000, 1000, 13, 0, 8},
+ { 16800000, 1000000000, 833, 14, 0, 8}, /* actual: 999.6 MHz */
+ { 19200000, 1000000000, 625, 12, 0, 8},
+ { 26000000, 1000000000, 1000, 26, 0, 8},
{ 0, 0, 0, 0, 0, 0 },
};
@@ -553,7 +558,7 @@ static struct tegra_clk_pll_params pll_c_params = {
.vco_max = 1400000000,
.base_reg = PLLC_BASE,
.misc_reg = PLLC_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -567,7 +572,7 @@ static struct tegra_clk_pll_params pll_m_params = {
.vco_max = 1200000000,
.base_reg = PLLM_BASE,
.misc_reg = PLLM_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -581,7 +586,7 @@ static struct tegra_clk_pll_params pll_p_params = {
.vco_max = 1400000000,
.base_reg = PLLP_BASE,
.misc_reg = PLLP_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -595,7 +600,7 @@ static struct tegra_clk_pll_params pll_a_params = {
.vco_max = 1400000000,
.base_reg = PLLA_BASE,
.misc_reg = PLLA_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -609,7 +614,7 @@ static struct tegra_clk_pll_params pll_d_params = {
.vco_max = 1000000000,
.base_reg = PLLD_BASE,
.misc_reg = PLLD_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
};
@@ -623,7 +628,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
.vco_max = 1000000000,
.base_reg = PLLD2_BASE,
.misc_reg = PLLD2_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
};
@@ -637,9 +642,10 @@ static struct tegra_clk_pll_params pll_u_params = {
.vco_max = 960000000,
.base_reg = PLLU_BASE,
.misc_reg = PLLU_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .pdiv_tohw = pllu_p,
};
static struct tegra_clk_pll_params pll_x_params = {
@@ -651,7 +657,7 @@ static struct tegra_clk_pll_params pll_x_params = {
.vco_max = 1700000000,
.base_reg = PLLX_BASE,
.misc_reg = PLLX_MISC,
- .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -665,7 +671,7 @@ static struct tegra_clk_pll_params pll_e_params = {
.vco_max = 2400000000U,
.base_reg = PLLE_BASE,
.misc_reg = PLLE_MISC,
- .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_mask = PLLE_MISC_LOCK,
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 300,
};
@@ -1661,7 +1667,7 @@ static void __init tegra30_periph_clk_init(void)
data = &tegra_periph_clk_list[i];
clk = tegra_clk_register_periph(data->name, data->parent_names,
data->num_parents, &data->periph,
- clk_base, data->offset);
+ clk_base, data->offset, data->flags);
clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
@@ -1911,9 +1917,16 @@ static __initdata struct tegra_clk_init_table init_table[] = {
{disp1, pll_p, 600000000, 0},
{disp2, pll_p, 600000000, 0},
{twd, clk_max, 0, 1},
+ {gr2d, pll_c, 300000000, 0},
+ {gr3d, pll_c, 300000000, 0},
{clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
};
+static void __init tegra30_clock_apply_init_table(void)
+{
+ tegra_init_from_table(init_table, clks, clk_max);
+}
+
/*
* Some clocks may be used by different drivers depending on the board
* configuration. List those here to register them twice in the clock lookup
@@ -1987,7 +2000,7 @@ void __init tegra30_clock_init(struct device_node *np)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
tegra_cpu_car_ops = &tegra30_cpu_car_ops;
}
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index a603b9a..923ca7e 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -22,7 +22,8 @@
#include "clk.h"
/* Global data of Tegra CPU CAR ops */
-struct tegra_cpu_car_ops *tegra_cpu_car_ops;
+static struct tegra_cpu_car_ops dummy_car_ops;
+struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
struct clk *clks[], int clk_max)
@@ -76,6 +77,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
static const struct of_device_id tegra_dt_clk_match[] = {
{ .compatible = "nvidia,tegra20-car", .data = tegra20_clock_init },
{ .compatible = "nvidia,tegra30-car", .data = tegra30_clock_init },
+ { .compatible = "nvidia,tegra114-car", .data = tegra114_clock_init },
{ }
};
@@ -83,3 +85,13 @@ void __init tegra_clocks_init(void)
{
of_clk_init(tegra_dt_clk_match);
}
+
+tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
+
+void __init tegra_clocks_apply_init_table(void)
+{
+ if (!tegra_clk_apply_init_table)
+ return;
+
+ tegra_clk_apply_init_table();
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index a09d7dc..e056562 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -1,4 +1,4 @@
-/*
+ /*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -117,6 +117,17 @@ struct tegra_clk_pll_freq_table {
};
/**
+ * struct pdiv_map - map post divider to hw value
+ *
+ * @pdiv: post divider
+ * @hw_val: value to be written to the PLL hw
+ */
+struct pdiv_map {
+ u8 pdiv;
+ u8 hw_val;
+};
+
+/**
* struct clk_pll_params - PLL parameters
*
* @input_min: Minimum input frequency
@@ -143,9 +154,18 @@ struct tegra_clk_pll_params {
u32 base_reg;
u32 misc_reg;
u32 lock_reg;
- u32 lock_bit_idx;
+ u32 lock_mask;
u32 lock_enable_bit_idx;
+ u32 iddq_reg;
+ u32 iddq_bit_idx;
+ u32 aux_reg;
+ u32 dyn_ramp_reg;
+ u32 ext_misc_reg[3];
+ int stepa_shift;
+ int stepb_shift;
int lock_delay;
+ int max_p;
+ struct pdiv_map *pdiv_tohw;
};
/**
@@ -182,12 +202,16 @@ struct tegra_clk_pll_params {
* TEGRA_PLL_FIXED - We are not supposed to change output frequency
* of some plls.
* TEGRA_PLLE_CONFIGURE - Configure PLLE when enabling.
+ * TEGRA_PLL_LOCK_MISC - Lock bit is in the misc register instead of the
+ * base register.
+ * TEGRA_PLL_BYPASS - PLL has bypass bit
+ * TEGRA_PLL_HAS_LOCK_ENABLE - PLL has bit to enable lock monitoring
*/
struct tegra_clk_pll {
struct clk_hw hw;
void __iomem *clk_base;
void __iomem *pmc;
- u8 flags;
+ u32 flags;
unsigned long fixed_rate;
spinlock_t *lock;
u8 divn_shift;
@@ -210,20 +234,64 @@ struct tegra_clk_pll {
#define TEGRA_PLLM BIT(5)
#define TEGRA_PLL_FIXED BIT(6)
#define TEGRA_PLLE_CONFIGURE BIT(7)
+#define TEGRA_PLL_LOCK_MISC BIT(8)
+#define TEGRA_PLL_BYPASS BIT(9)
+#define TEGRA_PLL_HAS_LOCK_ENABLE BIT(10)
extern const struct clk_ops tegra_clk_pll_ops;
extern const struct clk_ops tegra_clk_plle_ops;
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock);
+
+struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock);
+
+struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock);
+
+struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ u32 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock, unsigned long parent_rate);
+
+struct clk *tegra_clk_register_plle_tegra114(const char *name,
+ const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params,
+ struct tegra_clk_pll_freq_table *freq_table,
+ spinlock_t *lock);
+
/**
* struct tegra_clk_pll_out - PLL divider down clock
*
@@ -290,6 +358,7 @@ struct tegra_clk_periph_regs {
* TEGRA_PERIPH_ON_APB - If peripheral is in the APB bus then read the
* bus to flush the write operation in apb bus. This flag indicates
* that this peripheral is in apb bus.
+ * TEGRA_PERIPH_WAR_1005168 - Apply workaround for Tegra114 MSENC bug
*/
struct tegra_clk_periph_gate {
u32 magic;
@@ -309,6 +378,7 @@ struct tegra_clk_periph_gate {
#define TEGRA_PERIPH_NO_RESET BIT(0)
#define TEGRA_PERIPH_MANUAL_RESET BIT(1)
#define TEGRA_PERIPH_ON_APB BIT(2)
+#define TEGRA_PERIPH_WAR_1005168 BIT(3)
void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert);
extern const struct clk_ops tegra_clk_periph_gate_ops;
@@ -349,7 +419,7 @@ extern const struct clk_ops tegra_clk_periph_ops;
struct clk *tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
- u32 offset);
+ u32 offset, unsigned long flags);
struct clk *tegra_clk_register_periph_nodiv(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
@@ -392,12 +462,14 @@ struct tegra_periph_init_data {
u32 offset;
const char *con_id;
const char *dev_id;
+ unsigned long flags;
};
#define TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_mask, _mux_flags, _div_shift, \
_div_width, _div_frac_width, _div_flags, _regs, \
- _clk_num, _enb_refcnt, _gate_flags, _clk_id, _table) \
+ _clk_num, _enb_refcnt, _gate_flags, _clk_id, _table,\
+ _flags) \
{ \
.name = _name, \
.clk_id = _clk_id, \
@@ -412,6 +484,7 @@ struct tegra_periph_init_data {
.offset = _offset, \
.con_id = _con_id, \
.dev_id = _dev_id, \
+ .flags = _flags \
}
#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset,\
@@ -422,7 +495,7 @@ struct tegra_periph_init_data {
_mux_shift, BIT(_mux_width) - 1, _mux_flags, \
_div_shift, _div_width, _div_frac_width, _div_flags, \
_regs, _clk_num, _enb_refcnt, _gate_flags, _clk_id,\
- NULL)
+ NULL, 0)
/**
* struct clk_super_mux - super clock
@@ -510,4 +583,13 @@ void tegra30_clock_init(struct device_node *np);
static inline void tegra30_clock_init(struct device_node *np) {}
#endif /* CONFIG_ARCH_TEGRA_3x_SOC */
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+void tegra114_clock_init(struct device_node *np);
+#else
+static inline void tegra114_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA114_SOC */
+
+typedef void (*tegra_clk_apply_init_table_func)(void);
+extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index 7eee7f7..bd4769a 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/types.h>
-#include <mach/hardware.h>
#include "clk.h"
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
index 6b889a0..0b4f35a 100644
--- a/drivers/clk/ux500/u8500_clk.c
+++ b/drivers/clk/ux500/u8500_clk.c
@@ -12,10 +12,10 @@
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/clk-ux500.h>
-#include <mach/db8500-regs.h>
#include "clk.h"
-void u8500_clk_init(void)
+void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base)
{
struct prcmu_fw_version *fw_version;
const char *sgaclk_parent = NULL;
@@ -215,147 +215,148 @@ void u8500_clk_init(void)
*/
/* PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "uart0");
- clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
BIT(1), 0);
clk_register_clkdev(clk, "apb_pclk", "uart1");
- clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
BIT(2), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
- clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
BIT(3), 0);
clk_register_clkdev(clk, "apb_pclk", "msp0");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
- clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "msp1");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
- clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi0");
- clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
- clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
BIT(7), 0);
clk_register_clkdev(clk, NULL, "spi3");
- clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
BIT(8), 0);
clk_register_clkdev(clk, "apb_pclk", "slimbus0");
- clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
BIT(9), 0);
clk_register_clkdev(clk, NULL, "gpio.0");
clk_register_clkdev(clk, NULL, "gpio.1");
clk_register_clkdev(clk, NULL, "gpioblock0");
- clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
BIT(10), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
- clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
+ clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
BIT(11), 0);
clk_register_clkdev(clk, "apb_pclk", "msp3");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
- clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
- clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
BIT(1), 0);
clk_register_clkdev(clk, NULL, "spi2");
- clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
BIT(2), 0);
clk_register_clkdev(clk, NULL, "spi1");
- clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
BIT(3), 0);
clk_register_clkdev(clk, NULL, "pwl");
- clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi4");
- clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "msp2");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
- clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi1");
- clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi3");
- clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
BIT(8), 0);
clk_register_clkdev(clk, NULL, "spi0");
- clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
BIT(9), 0);
clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
- clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
BIT(10), 0);
clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
- clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
BIT(11), 0);
clk_register_clkdev(clk, NULL, "gpio.6");
clk_register_clkdev(clk, NULL, "gpio.7");
clk_register_clkdev(clk, NULL, "gpioblock1");
- clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", U8500_CLKRST2_BASE,
+ clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
BIT(12), 0);
- clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
BIT(0), 0);
- clk_register_clkdev(clk, NULL, "fsmc");
+ clk_register_clkdev(clk, "fsmc", NULL);
+ clk_register_clkdev(clk, NULL, "smsc911x");
- clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
BIT(1), 0);
clk_register_clkdev(clk, "apb_pclk", "ssp0");
- clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
BIT(2), 0);
clk_register_clkdev(clk, "apb_pclk", "ssp1");
- clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
BIT(3), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
- clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi2");
- clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "ske");
clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
- clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "uart2");
- clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi5");
- clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", U8500_CLKRST3_BASE,
+ clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
BIT(8), 0);
clk_register_clkdev(clk, NULL, "gpio.2");
clk_register_clkdev(clk, NULL, "gpio.3");
@@ -363,45 +364,45 @@ void u8500_clk_init(void)
clk_register_clkdev(clk, NULL, "gpio.5");
clk_register_clkdev(clk, NULL, "gpioblock2");
- clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", U8500_CLKRST5_BASE,
+ clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
BIT(0), 0);
clk_register_clkdev(clk, "usb", "musb-ux500.0");
- clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", U8500_CLKRST5_BASE,
+ clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
BIT(1), 0);
clk_register_clkdev(clk, NULL, "gpio.8");
clk_register_clkdev(clk, NULL, "gpioblock3");
- clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "rng");
- clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
BIT(1), 0);
clk_register_clkdev(clk, NULL, "cryp0");
clk_register_clkdev(clk, NULL, "cryp1");
- clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
BIT(2), 0);
clk_register_clkdev(clk, NULL, "hash0");
- clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
BIT(3), 0);
clk_register_clkdev(clk, NULL, "pka");
- clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
BIT(4), 0);
clk_register_clkdev(clk, NULL, "hash1");
- clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
BIT(5), 0);
clk_register_clkdev(clk, NULL, "cfgreg");
- clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "mtu0");
- clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", U8500_CLKRST6_BASE,
+ clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "mtu1");
@@ -415,110 +416,110 @@ void u8500_clk_init(void)
/* Periph1 */
clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
- U8500_CLKRST1_BASE, BIT(0), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart0");
clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
- U8500_CLKRST1_BASE, BIT(1), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart1");
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
- U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.1");
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
- U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp0");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
- U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp1");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
- U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi0");
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
- U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.2");
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "slimbus0");
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
- U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.4");
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
- U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
+ clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp3");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
/* Periph2 */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
- U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
+ clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.3");
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
- U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi4");
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
- U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp2");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
- U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
+ clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi1");
clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
- U8500_CLKRST2_BASE, BIT(5), CLK_SET_RATE_GATE);
+ clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi3");
/* Note that rate is received from parent. */
clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
- U8500_CLKRST2_BASE, BIT(6),
+ clkrst2_base, BIT(6),
CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
- U8500_CLKRST2_BASE, BIT(7),
+ clkrst2_base, BIT(7),
CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
/* Periph3 */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
- U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ssp0");
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
- U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ssp1");
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
- U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.0");
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
- U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi2");
clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
- U8500_CLKRST3_BASE, BIT(5), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ske");
clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
- U8500_CLKRST3_BASE, BIT(6), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart2");
clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
- U8500_CLKRST3_BASE, BIT(7), CLK_SET_RATE_GATE);
+ clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi5");
/* Periph6 */
clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
- U8500_CLKRST6_BASE, BIT(0), CLK_SET_RATE_GATE);
+ clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "rng");
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e507ab7..f151c6c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -25,12 +25,15 @@ config DW_APB_TIMER_OF
config ARMADA_370_XP_TIMER
bool
-config SUNXI_TIMER
+config SUN4I_TIMER
bool
config VT8500_TIMER
bool
+config CADENCE_TTC_TIMER
+ bool
+
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -62,8 +65,23 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
config ARM_ARCH_TIMER
bool
+ select CLKSRC_OF if OF
config CLKSRC_METAG_GENERIC
def_bool y if METAG
help
This option enables support for the Meta per-thread timers.
+
+config CLKSRC_EXYNOS_MCT
+ def_bool y if ARCH_EXYNOS
+ help
+ Support for Multi Core Timer controller on Exynos SoCs.
+
+config CLKSRC_SAMSUNG_PWM
+ bool
+ select CLKSRC_MMIO
+ help
+ This is a new clocksource driver for the PWM timer found in
+ Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
+ for all devicetree enabled platforms. This driver will be
+ needed only on systems that do not have the Exynos MCT available.
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 96e2531..8d979c7 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -16,10 +16,16 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
-obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
+obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
+obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
+obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
+obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
+obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
+obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
+obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index d7ad425..a2b2541 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -248,14 +248,16 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
-
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
- arch_timer_setup(evt);
+ arch_timer_setup(this_cpu_ptr(arch_timer_evt));
break;
case CPU_DYING:
- arch_timer_stop(evt);
+ arch_timer_stop(this_cpu_ptr(arch_timer_evt));
break;
}
@@ -337,22 +339,14 @@ out:
return err;
}
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer", },
- { .compatible = "arm,armv8-timer", },
- {},
-};
-
-int __init arch_timer_init(void)
+static void __init arch_timer_init(struct device_node *np)
{
- struct device_node *np;
u32 freq;
int i;
- np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- pr_err("arch_timer: can't find DT node\n");
- return -ENODEV;
+ if (arch_timer_get_rate()) {
+ pr_warn("arch_timer: multiple nodes in dt, skipping\n");
+ return;
}
/* Try to determine the frequency from the device tree or CNTFRQ */
@@ -378,7 +372,7 @@ int __init arch_timer_init(void)
if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
!arch_timer_ppi[PHYS_NONSECURE_PPI]) {
pr_warn("arch_timer: No interrupt available, giving up\n");
- return -EINVAL;
+ return;
}
}
@@ -387,5 +381,8 @@ int __init arch_timer_init(void)
else
arch_timer_read_counter = arch_counter_get_cntpct;
- return arch_timer_register();
+ arch_timer_register();
+ arch_timer_arch_init();
}
+CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
+CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 50c68fe..766611d 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -95,23 +95,13 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
}
}
-static struct of_device_id bcm2835_time_match[] __initconst = {
- { .compatible = "brcm,bcm2835-system-timer" },
- {}
-};
-
-static void __init bcm2835_timer_init(void)
+static void __init bcm2835_timer_init(struct device_node *node)
{
- struct device_node *node;
void __iomem *base;
u32 freq;
int irq;
struct bcm2835_timer *timer;
- node = of_find_matching_node(NULL, bcm2835_time_match);
- if (!node)
- panic("No bcm2835 timer node");
-
base = of_iomap(node, 0);
if (!base)
panic("Can't remap registers");
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
new file mode 100644
index 0000000..685bc60
--- /dev/null
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -0,0 +1,436 @@
+/*
+ * This file contains driver for the Cadence Triple Timer Counter Rev 06
+ *
+ * Copyright (C) 2011-2013 Xilinx
+ *
+ * based on arch/mips/kernel/time.c timer driver
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+/*
+ * This driver configures the 2 16-bit count-up timers as follows:
+ *
+ * T1: Timer 1, clocksource for generic timekeeping
+ * T2: Timer 2, clockevent source for hrtimers
+ * T3: Timer 3, <unused>
+ *
+ * The input frequency to the timer module for emulation is 2.5MHz which is
+ * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32,
+ * the timers are clocked at 78.125KHz (12.8 us resolution).
+
+ * The input frequency to the timer module in silicon is configurable and
+ * obtained from device tree. The pre-scaler of 32 is used.
+ */
+
+/*
+ * Timer Register Offset Definitions of Timer 1, Increment base address by 4
+ * and use same offsets for Timer 2
+ */
+#define TTC_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */
+#define TTC_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */
+#define TTC_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */
+#define TTC_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */
+#define TTC_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */
+#define TTC_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */
+
+#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
+
+/*
+ * Setup the timers to use pre-scaling, using a fixed value for now that will
+ * work across most input frequency, but it may need to be more dynamic
+ */
+#define PRESCALE_EXPONENT 11 /* 2 ^ PRESCALE_EXPONENT = PRESCALE */
+#define PRESCALE 2048 /* The exponent must match this */
+#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1)
+#define CLK_CNTRL_PRESCALE_EN 1
+#define CNT_CNTRL_RESET (1 << 4)
+
+/**
+ * struct ttc_timer - This definition defines local timer structure
+ *
+ * @base_addr: Base address of timer
+ * @clk: Associated clock source
+ * @clk_rate_change_nb Notifier block for clock rate changes
+ */
+struct ttc_timer {
+ void __iomem *base_addr;
+ struct clk *clk;
+ struct notifier_block clk_rate_change_nb;
+};
+
+#define to_ttc_timer(x) \
+ container_of(x, struct ttc_timer, clk_rate_change_nb)
+
+struct ttc_timer_clocksource {
+ struct ttc_timer ttc;
+ struct clocksource cs;
+};
+
+#define to_ttc_timer_clksrc(x) \
+ container_of(x, struct ttc_timer_clocksource, cs)
+
+struct ttc_timer_clockevent {
+ struct ttc_timer ttc;
+ struct clock_event_device ce;
+};
+
+#define to_ttc_timer_clkevent(x) \
+ container_of(x, struct ttc_timer_clockevent, ce)
+
+/**
+ * ttc_set_interval - Set the timer interval value
+ *
+ * @timer: Pointer to the timer instance
+ * @cycles: Timer interval ticks
+ **/
+static void ttc_set_interval(struct ttc_timer *timer,
+ unsigned long cycles)
+{
+ u32 ctrl_reg;
+
+ /* Disable the counter, set the counter value and re-enable counter */
+ ctrl_reg = __raw_readl(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+
+ __raw_writel(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
+
+ /*
+ * Reset the counter (0x10) so that it starts from 0, one-shot
+ * mode makes this needed for timing to be right.
+ */
+ ctrl_reg |= CNT_CNTRL_RESET;
+ ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+}
+
+/**
+ * ttc_clock_event_interrupt - Clock event timer interrupt handler
+ *
+ * @irq: IRQ number of the Timer
+ * @dev_id: void pointer to the ttc_timer instance
+ *
+ * returns: Always IRQ_HANDLED - success
+ **/
+static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
+{
+ struct ttc_timer_clockevent *ttce = dev_id;
+ struct ttc_timer *timer = &ttce->ttc;
+
+ /* Acknowledge the interrupt and call event handler */
+ __raw_readl(timer->base_addr + TTC_ISR_OFFSET);
+
+ ttce->ce.event_handler(&ttce->ce);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __ttc_clocksource_read - Reads the timer counter register
+ *
+ * returns: Current timer counter register value
+ **/
+static cycle_t __ttc_clocksource_read(struct clocksource *cs)
+{
+ struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
+
+ return (cycle_t)__raw_readl(timer->base_addr +
+ TTC_COUNT_VAL_OFFSET);
+}
+
+/**
+ * ttc_set_next_event - Sets the time interval for next event
+ *
+ * @cycles: Timer interval ticks
+ * @evt: Address of clock event instance
+ *
+ * returns: Always 0 - success
+ **/
+static int ttc_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+ struct ttc_timer *timer = &ttce->ttc;
+
+ ttc_set_interval(timer, cycles);
+ return 0;
+}
+
+/**
+ * ttc_set_mode - Sets the mode of timer
+ *
+ * @mode: Mode to be set
+ * @evt: Address of clock event instance
+ **/
+static void ttc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+ struct ttc_timer *timer = &ttce->ttc;
+ u32 ctrl_reg;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ ttc_set_interval(timer,
+ DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk),
+ PRESCALE * HZ));
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ ctrl_reg = __raw_readl(timer->base_addr +
+ TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg,
+ timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ ctrl_reg = __raw_readl(timer->base_addr +
+ TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg,
+ timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ break;
+ }
+}
+
+static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct ttc_timer *ttc = to_ttc_timer(nb);
+ struct ttc_timer_clocksource *ttccs = container_of(ttc,
+ struct ttc_timer_clocksource, ttc);
+
+ switch (event) {
+ case POST_RATE_CHANGE:
+ /*
+ * Do whatever is necessary to maintain a proper time base
+ *
+ * I cannot find a way to adjust the currently used clocksource
+ * to the new frequency. __clocksource_updatefreq_hz() sounds
+ * good, but does not work. Not sure what's that missing.
+ *
+ * This approach works, but triggers two clocksource switches.
+ * The first after unregister to clocksource jiffies. And
+ * another one after the register to the newly registered timer.
+ *
+ * Alternatively we could 'waste' another HW timer to ping pong
+ * between clock sources. That would also use one register and
+ * one unregister call, but only trigger one clocksource switch
+ * for the cost of another HW timer used by the OS.
+ */
+ clocksource_unregister(&ttccs->cs);
+ clocksource_register_hz(&ttccs->cs,
+ ndata->new_rate / PRESCALE);
+ /* fall through */
+ case PRE_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
+{
+ struct ttc_timer_clocksource *ttccs;
+ int err;
+
+ ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
+ if (WARN_ON(!ttccs))
+ return;
+
+ ttccs->ttc.clk = clk;
+
+ err = clk_prepare_enable(ttccs->ttc.clk);
+ if (WARN_ON(err)) {
+ kfree(ttccs);
+ return;
+ }
+
+ ttccs->ttc.clk_rate_change_nb.notifier_call =
+ ttc_rate_change_clocksource_cb;
+ ttccs->ttc.clk_rate_change_nb.next = NULL;
+ if (clk_notifier_register(ttccs->ttc.clk,
+ &ttccs->ttc.clk_rate_change_nb))
+ pr_warn("Unable to register clock notifier.\n");
+
+ ttccs->ttc.base_addr = base;
+ ttccs->cs.name = "ttc_clocksource";
+ ttccs->cs.rating = 200;
+ ttccs->cs.read = __ttc_clocksource_read;
+ ttccs->cs.mask = CLOCKSOURCE_MASK(16);
+ ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /*
+ * Setup the clock source counter to be an incrementing counter
+ * with no interrupt and it rolls over at 0xFFFF. Pre-scale
+ * it by 32 also. Let it start running now.
+ */
+ __raw_writel(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
+ __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ __raw_writel(CNT_CNTRL_RESET,
+ ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
+
+ err = clocksource_register_hz(&ttccs->cs,
+ clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+ if (WARN_ON(err)) {
+ kfree(ttccs);
+ return;
+ }
+}
+
+static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct ttc_timer *ttc = to_ttc_timer(nb);
+ struct ttc_timer_clockevent *ttcce = container_of(ttc,
+ struct ttc_timer_clockevent, ttc);
+
+ switch (event) {
+ case POST_RATE_CHANGE:
+ {
+ unsigned long flags;
+
+ /*
+ * clockevents_update_freq should be called with IRQ disabled on
+ * the CPU the timer provides events for. The timer we use is
+ * common to both CPUs, not sure if we need to run on both
+ * cores.
+ */
+ local_irq_save(flags);
+ clockevents_update_freq(&ttcce->ce,
+ ndata->new_rate / PRESCALE);
+ local_irq_restore(flags);
+
+ /* fall through */
+ }
+ case PRE_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static void __init ttc_setup_clockevent(struct clk *clk,
+ void __iomem *base, u32 irq)
+{
+ struct ttc_timer_clockevent *ttcce;
+ int err;
+
+ ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
+ if (WARN_ON(!ttcce))
+ return;
+
+ ttcce->ttc.clk = clk;
+
+ err = clk_prepare_enable(ttcce->ttc.clk);
+ if (WARN_ON(err)) {
+ kfree(ttcce);
+ return;
+ }
+
+ ttcce->ttc.clk_rate_change_nb.notifier_call =
+ ttc_rate_change_clockevent_cb;
+ ttcce->ttc.clk_rate_change_nb.next = NULL;
+ if (clk_notifier_register(ttcce->ttc.clk,
+ &ttcce->ttc.clk_rate_change_nb))
+ pr_warn("Unable to register clock notifier.\n");
+
+ ttcce->ttc.base_addr = base;
+ ttcce->ce.name = "ttc_clockevent";
+ ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ttcce->ce.set_next_event = ttc_set_next_event;
+ ttcce->ce.set_mode = ttc_set_mode;
+ ttcce->ce.rating = 200;
+ ttcce->ce.irq = irq;
+ ttcce->ce.cpumask = cpu_possible_mask;
+
+ /*
+ * Setup the clock event timer to be an interval timer which
+ * is prescaled by 32 using the interval interrupt. Leave it
+ * disabled for now.
+ */
+ __raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
+ __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
+
+ err = request_irq(irq, ttc_clock_event_interrupt,
+ IRQF_DISABLED | IRQF_TIMER,
+ ttcce->ce.name, ttcce);
+ if (WARN_ON(err)) {
+ kfree(ttcce);
+ return;
+ }
+
+ clockevents_config_and_register(&ttcce->ce,
+ clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe);
+}
+
+/**
+ * ttc_timer_init - Initialize the timer
+ *
+ * Initializes the timer hardware and register the clock source and clock event
+ * timers with Linux kernal timer framework
+ */
+static void __init ttc_timer_init(struct device_node *timer)
+{
+ unsigned int irq;
+ void __iomem *timer_baseaddr;
+ struct clk *clk;
+ static int initialized;
+
+ if (initialized)
+ return;
+
+ initialized = 1;
+
+ /*
+ * Get the 1st Triple Timer Counter (TTC) block from the device tree
+ * and use it. Note that the event timer uses the interrupt and it's the
+ * 2nd TTC hence the irq_of_parse_and_map(,1)
+ */
+ timer_baseaddr = of_iomap(timer, 0);
+ if (!timer_baseaddr) {
+ pr_err("ERROR: invalid timer base address\n");
+ BUG();
+ }
+
+ irq = irq_of_parse_and_map(timer, 1);
+ if (irq <= 0) {
+ pr_err("ERROR: invalid interrupt number\n");
+ BUG();
+ }
+
+ clk = of_clk_get_by_name(timer, "cpu_1x");
+ if (IS_ERR(clk)) {
+ pr_err("ERROR: timer input clock not found\n");
+ BUG();
+ }
+
+ ttc_setup_clocksource(clk, timer_baseaddr);
+ ttc_setup_clockevent(clk, timer_baseaddr + 4, irq);
+
+ pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+}
+
+CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index c26c369..54f3d11 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -17,9 +17,6 @@
#include <asm/sched_clock.h>
-#include <mach/setup.h>
-#include <mach/hardware.h>
-
#define RATE_32K 32768
#define TIMER_MODE_CONTINOUS 0x1
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index bdabdaa..37f5325 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/clocksource.h>
extern struct of_device_id __clksrc_of_table[];
@@ -26,10 +27,10 @@ void __init clocksource_of_init(void)
{
struct device_node *np;
const struct of_device_id *match;
- void (*init_func)(void);
+ clocksource_of_init_fn init_func;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
init_func = match->data;
- init_func();
+ init_func(np);
}
}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index e6a553c..4329a29 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -399,7 +399,18 @@ static struct platform_driver em_sti_device_driver = {
}
};
-module_platform_driver(em_sti_device_driver);
+static int __init em_sti_init(void)
+{
+ return platform_driver_register(&em_sti_device_driver);
+}
+
+static void __exit em_sti_exit(void)
+{
+ platform_driver_unregister(&em_sti_device_driver);
+}
+
+subsys_initcall(em_sti_init);
+module_exit(em_sti_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
new file mode 100644
index 0000000..662fcc0
--- /dev/null
+++ b/drivers/clocksource/exynos_mct.c
@@ -0,0 +1,558 @@
+/* linux/arch/arm/mach-exynos4/mct.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 MCT(Multi-Core Timer) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/clocksource.h>
+
+#include <asm/localtimer.h>
+#include <asm/mach/time.h>
+
+#define EXYNOS4_MCTREG(x) (x)
+#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
+#define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
+#define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
+#define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
+#define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
+#define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
+#define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
+#define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
+#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
+#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
+#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
+#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
+#define EXYNOS4_MCT_L_MASK (0xffffff00)
+
+#define MCT_L_TCNTB_OFFSET (0x00)
+#define MCT_L_ICNTB_OFFSET (0x08)
+#define MCT_L_TCON_OFFSET (0x20)
+#define MCT_L_INT_CSTAT_OFFSET (0x30)
+#define MCT_L_INT_ENB_OFFSET (0x34)
+#define MCT_L_WSTAT_OFFSET (0x40)
+#define MCT_G_TCON_START (1 << 8)
+#define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
+#define MCT_G_TCON_COMP0_ENABLE (1 << 0)
+#define MCT_L_TCON_INTERVAL_MODE (1 << 2)
+#define MCT_L_TCON_INT_START (1 << 1)
+#define MCT_L_TCON_TIMER_START (1 << 0)
+
+#define TICK_BASE_CNT 1
+
+enum {
+ MCT_INT_SPI,
+ MCT_INT_PPI
+};
+
+enum {
+ MCT_G0_IRQ,
+ MCT_G1_IRQ,
+ MCT_G2_IRQ,
+ MCT_G3_IRQ,
+ MCT_L0_IRQ,
+ MCT_L1_IRQ,
+ MCT_L2_IRQ,
+ MCT_L3_IRQ,
+ MCT_NR_IRQS,
+};
+
+static void __iomem *reg_base;
+static unsigned long clk_rate;
+static unsigned int mct_int_type;
+static int mct_irqs[MCT_NR_IRQS];
+
+struct mct_clock_event_device {
+ struct clock_event_device *evt;
+ unsigned long base;
+ char name[10];
+};
+
+static void exynos4_mct_write(unsigned int value, unsigned long offset)
+{
+ unsigned long stat_addr;
+ u32 mask;
+ u32 i;
+
+ __raw_writel(value, reg_base + offset);
+
+ if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
+ stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & EXYNOS4_MCT_L_MASK) {
+ case MCT_L_TCON_OFFSET:
+ mask = 1 << 3; /* L_TCON write status */
+ break;
+ case MCT_L_ICNTB_OFFSET:
+ mask = 1 << 1; /* L_ICNTB write status */
+ break;
+ case MCT_L_TCNTB_OFFSET:
+ mask = 1 << 0; /* L_TCNTB write status */
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (offset) {
+ case EXYNOS4_MCT_G_TCON:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 16; /* G_TCON write status */
+ break;
+ case EXYNOS4_MCT_G_COMP0_L:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 0; /* G_COMP0_L write status */
+ break;
+ case EXYNOS4_MCT_G_COMP0_U:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 1; /* G_COMP0_U write status */
+ break;
+ case EXYNOS4_MCT_G_COMP0_ADD_INCR:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
+ break;
+ case EXYNOS4_MCT_G_CNT_L:
+ stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
+ mask = 1 << 0; /* G_CNT_L write status */
+ break;
+ case EXYNOS4_MCT_G_CNT_U:
+ stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
+ mask = 1 << 1; /* G_CNT_U write status */
+ break;
+ default:
+ return;
+ }
+ }
+
+ /* Wait maximum 1 ms until written values are applied */
+ for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
+ if (__raw_readl(reg_base + stat_addr) & mask) {
+ __raw_writel(mask, reg_base + stat_addr);
+ return;
+ }
+
+ panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
+}
+
+/* Clocksource handling */
+static void exynos4_mct_frc_start(u32 hi, u32 lo)
+{
+ u32 reg;
+
+ exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
+ exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
+
+ reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ reg |= MCT_G_TCON_START;
+ exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
+}
+
+static cycle_t exynos4_frc_read(struct clocksource *cs)
+{
+ unsigned int lo, hi;
+ u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+
+ do {
+ hi = hi2;
+ lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
+ hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+ } while (hi != hi2);
+
+ return ((cycle_t)hi << 32) | lo;
+}
+
+static void exynos4_frc_resume(struct clocksource *cs)
+{
+ exynos4_mct_frc_start(0, 0);
+}
+
+struct clocksource mct_frc = {
+ .name = "mct-frc",
+ .rating = 400,
+ .read = exynos4_frc_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .resume = exynos4_frc_resume,
+};
+
+static void __init exynos4_clocksource_init(void)
+{
+ exynos4_mct_frc_start(0, 0);
+
+ if (clocksource_register_hz(&mct_frc, clk_rate))
+ panic("%s: can't register clocksource\n", mct_frc.name);
+}
+
+static void exynos4_mct_comp0_stop(void)
+{
+ unsigned int tcon;
+
+ tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
+
+ exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
+ exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
+}
+
+static void exynos4_mct_comp0_start(enum clock_event_mode mode,
+ unsigned long cycles)
+{
+ unsigned int tcon;
+ cycle_t comp_cycle;
+
+ tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ tcon |= MCT_G_TCON_COMP0_AUTO_INC;
+ exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
+ }
+
+ comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
+ exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
+ exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
+
+ exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
+
+ tcon |= MCT_G_TCON_COMP0_ENABLE;
+ exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
+}
+
+static int exynos4_comp_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ exynos4_mct_comp0_start(evt->mode, cycles);
+
+ return 0;
+}
+
+static void exynos4_comp_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long cycles_per_jiffy;
+ exynos4_mct_comp0_stop();
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ cycles_per_jiffy =
+ (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
+ exynos4_mct_comp0_start(mode, cycles_per_jiffy);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device mct_comp_device = {
+ .name = "mct-comp",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 250,
+ .set_next_event = exynos4_comp_set_next_event,
+ .set_mode = exynos4_comp_set_mode,
+};
+
+static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mct_comp_event_irq = {
+ .name = "mct_comp_irq",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = exynos4_mct_comp_isr,
+ .dev_id = &mct_comp_device,
+};
+
+static void exynos4_clockevent_init(void)
+{
+ mct_comp_device.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&mct_comp_device, clk_rate,
+ 0xf, 0xffffffff);
+ setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+}
+
+#ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
+/* Clock event handling */
+static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
+{
+ unsigned long tmp;
+ unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
+ unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
+
+ tmp = __raw_readl(reg_base + offset);
+ if (tmp & mask) {
+ tmp &= ~mask;
+ exynos4_mct_write(tmp, offset);
+ }
+}
+
+static void exynos4_mct_tick_start(unsigned long cycles,
+ struct mct_clock_event_device *mevt)
+{
+ unsigned long tmp;
+
+ exynos4_mct_tick_stop(mevt);
+
+ tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
+
+ /* update interrupt count buffer */
+ exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
+
+ /* enable MCT tick interrupt */
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
+
+ tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
+ tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
+ MCT_L_TCON_INTERVAL_MODE;
+ exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
+}
+
+static int exynos4_tick_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+
+ exynos4_mct_tick_start(cycles, mevt);
+
+ return 0;
+}
+
+static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+ unsigned long cycles_per_jiffy;
+
+ exynos4_mct_tick_stop(mevt);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ cycles_per_jiffy =
+ (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
+ exynos4_mct_tick_start(cycles_per_jiffy, mevt);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+{
+ struct clock_event_device *evt = mevt->evt;
+
+ /*
+ * This is for supporting oneshot mode.
+ * Mct would generate interrupt periodically
+ * without explicit stopping.
+ */
+ if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
+ exynos4_mct_tick_stop(mevt);
+
+ /* Clear the MCT tick interrupt */
+ if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
+{
+ struct mct_clock_event_device *mevt = dev_id;
+ struct clock_event_device *evt = mevt->evt;
+
+ exynos4_mct_tick_clear(mevt);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mct_tick0_event_irq = {
+ .name = "mct_tick0_irq",
+ .flags = IRQF_TIMER | IRQF_NOBALANCING,
+ .handler = exynos4_mct_tick_isr,
+};
+
+static struct irqaction mct_tick1_event_irq = {
+ .name = "mct_tick1_irq",
+ .flags = IRQF_TIMER | IRQF_NOBALANCING,
+ .handler = exynos4_mct_tick_isr,
+};
+
+static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
+{
+ struct mct_clock_event_device *mevt;
+ unsigned int cpu = smp_processor_id();
+
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ mevt->evt = evt;
+
+ mevt->base = EXYNOS4_MCT_L_BASE(cpu);
+ sprintf(mevt->name, "mct_tick%d", cpu);
+
+ evt->name = mevt->name;
+ evt->cpumask = cpumask_of(cpu);
+ evt->set_next_event = exynos4_tick_set_next_event;
+ evt->set_mode = exynos4_tick_set_mode;
+ evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = 450;
+ clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+ 0xf, 0x7fffffff);
+
+ exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
+
+ if (mct_int_type == MCT_INT_SPI) {
+ if (cpu == 0) {
+ mct_tick0_event_irq.dev_id = mevt;
+ evt->irq = mct_irqs[MCT_L0_IRQ];
+ setup_irq(evt->irq, &mct_tick0_event_irq);
+ } else {
+ mct_tick1_event_irq.dev_id = mevt;
+ evt->irq = mct_irqs[MCT_L1_IRQ];
+ setup_irq(evt->irq, &mct_tick1_event_irq);
+ irq_set_affinity(evt->irq, cpumask_of(1));
+ }
+ } else {
+ enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
+ }
+
+ return 0;
+}
+
+static void exynos4_local_timer_stop(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ if (mct_int_type == MCT_INT_SPI)
+ if (cpu == 0)
+ remove_irq(evt->irq, &mct_tick0_event_irq);
+ else
+ remove_irq(evt->irq, &mct_tick1_event_irq);
+ else
+ disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+}
+
+static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
+ .setup = exynos4_local_timer_setup,
+ .stop = exynos4_local_timer_stop,
+};
+#endif /* CONFIG_LOCAL_TIMERS */
+
+static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+{
+ struct clk *mct_clk, *tick_clk;
+
+ tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
+ clk_get(NULL, "fin_pll");
+ if (IS_ERR(tick_clk))
+ panic("%s: unable to determine tick clock rate\n", __func__);
+ clk_rate = clk_get_rate(tick_clk);
+
+ mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
+ if (IS_ERR(mct_clk))
+ panic("%s: unable to retrieve mct clock instance\n", __func__);
+ clk_prepare_enable(mct_clk);
+
+ reg_base = base;
+ if (!reg_base)
+ panic("%s: unable to ioremap mct address space\n", __func__);
+
+#ifdef CONFIG_LOCAL_TIMERS
+ if (mct_int_type == MCT_INT_PPI) {
+ int err;
+
+ err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
+ exynos4_mct_tick_isr, "MCT",
+ &percpu_mct_tick);
+ WARN(err, "MCT: can't request IRQ %d (%d)\n",
+ mct_irqs[MCT_L0_IRQ], err);
+ }
+
+ local_timer_register(&exynos4_mct_tick_ops);
+#endif /* CONFIG_LOCAL_TIMERS */
+}
+
+void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1)
+{
+ mct_irqs[MCT_G0_IRQ] = irq_g0;
+ mct_irqs[MCT_L0_IRQ] = irq_l0;
+ mct_irqs[MCT_L1_IRQ] = irq_l1;
+ mct_int_type = MCT_INT_SPI;
+
+ exynos4_timer_resources(NULL, base);
+ exynos4_clocksource_init();
+ exynos4_clockevent_init();
+}
+
+static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
+{
+ u32 nr_irqs, i;
+
+ mct_int_type = int_type;
+
+ /* This driver uses only one global timer interrupt */
+ mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
+
+ /*
+ * Find out the number of local irqs specified. The local
+ * timer irqs are specified after the four global timer
+ * irqs are specified.
+ */
+#ifdef CONFIG_OF
+ nr_irqs = of_irq_count(np);
+#else
+ nr_irqs = 0;
+#endif
+ for (i = MCT_L0_IRQ; i < nr_irqs; i++)
+ mct_irqs[i] = irq_of_parse_and_map(np, i);
+
+ exynos4_timer_resources(np, of_iomap(np, 0));
+ exynos4_clocksource_init();
+ exynos4_clockevent_init();
+}
+
+
+static void __init mct_init_spi(struct device_node *np)
+{
+ return mct_init_dt(np, MCT_INT_SPI);
+}
+
+static void __init mct_init_ppi(struct device_node *np)
+{
+ return mct_init_dt(np, MCT_INT_PPI);
+}
+CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
+CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
new file mode 100644
index 0000000..02af420
--- /dev/null
+++ b/drivers/clocksource/mxs_timer.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2000-2001 Deep Blue Solutions
+ * Copyright (C) 2002 Shane Nay (shane@minirl.com)
+ * Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
+ * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/stmp_device.h>
+
+#include <asm/mach/time.h>
+#include <asm/sched_clock.h>
+
+/*
+ * There are 2 versions of the timrot on Freescale MXS-based SoCs.
+ * The v1 on MX23 only gets 16 bits counter, while v2 on MX28
+ * extends the counter to 32 bits.
+ *
+ * The implementation uses two timers, one for clock_event and
+ * another for clocksource. MX28 uses timrot 0 and 1, while MX23
+ * uses 0 and 2.
+ */
+
+#define MX23_TIMROT_VERSION_OFFSET 0x0a0
+#define MX28_TIMROT_VERSION_OFFSET 0x120
+#define BP_TIMROT_MAJOR_VERSION 24
+#define BV_TIMROT_VERSION_1 0x01
+#define BV_TIMROT_VERSION_2 0x02
+#define timrot_is_v1() (timrot_major_version == BV_TIMROT_VERSION_1)
+
+/*
+ * There are 4 registers for each timrotv2 instance, and 2 registers
+ * for each timrotv1. So address step 0x40 in macros below strides
+ * one instance of timrotv2 while two instances of timrotv1.
+ *
+ * As the result, HW_TIMROT_XXXn(1) defines the address of timrot1
+ * on MX28 while timrot2 on MX23.
+ */
+/* common between v1 and v2 */
+#define HW_TIMROT_ROTCTRL 0x00
+#define HW_TIMROT_TIMCTRLn(n) (0x20 + (n) * 0x40)
+/* v1 only */
+#define HW_TIMROT_TIMCOUNTn(n) (0x30 + (n) * 0x40)
+/* v2 only */
+#define HW_TIMROT_RUNNING_COUNTn(n) (0x30 + (n) * 0x40)
+#define HW_TIMROT_FIXED_COUNTn(n) (0x40 + (n) * 0x40)
+
+#define BM_TIMROT_TIMCTRLn_RELOAD (1 << 6)
+#define BM_TIMROT_TIMCTRLn_UPDATE (1 << 7)
+#define BM_TIMROT_TIMCTRLn_IRQ_EN (1 << 14)
+#define BM_TIMROT_TIMCTRLn_IRQ (1 << 15)
+#define BP_TIMROT_TIMCTRLn_SELECT 0
+#define BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL 0x8
+#define BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL 0xb
+#define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS 0xf
+
+static struct clock_event_device mxs_clockevent_device;
+static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED;
+
+static void __iomem *mxs_timrot_base;
+static u32 timrot_major_version;
+
+static inline void timrot_irq_disable(void)
+{
+ __raw_writel(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base +
+ HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
+}
+
+static inline void timrot_irq_enable(void)
+{
+ __raw_writel(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base +
+ HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_SET);
+}
+
+static void timrot_irq_acknowledge(void)
+{
+ __raw_writel(BM_TIMROT_TIMCTRLn_IRQ, mxs_timrot_base +
+ HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
+}
+
+static cycle_t timrotv1_get_cycles(struct clocksource *cs)
+{
+ return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
+ & 0xffff0000) >> 16);
+}
+
+static int timrotv1_set_next_event(unsigned long evt,
+ struct clock_event_device *dev)
+{
+ /* timrot decrements the count */
+ __raw_writel(evt, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(0));
+
+ return 0;
+}
+
+static int timrotv2_set_next_event(unsigned long evt,
+ struct clock_event_device *dev)
+{
+ /* timrot decrements the count */
+ __raw_writel(evt, mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(0));
+
+ return 0;
+}
+
+static irqreturn_t mxs_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ timrot_irq_acknowledge();
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mxs_timer_irq = {
+ .name = "MXS Timer Tick",
+ .dev_id = &mxs_clockevent_device,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = mxs_timer_interrupt,
+};
+
+#ifdef DEBUG
+static const char *clock_event_mode_label[] const = {
+ [CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
+ [CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT",
+ [CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
+ [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED"
+};
+#endif /* DEBUG */
+
+static void mxs_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Disable interrupt in timer module */
+ timrot_irq_disable();
+
+ if (mode != mxs_clockevent_mode) {
+ /* Set event time into the furthest future */
+ if (timrot_is_v1())
+ __raw_writel(0xffff,
+ mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
+ else
+ __raw_writel(0xffffffff,
+ mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
+
+ /* Clear pending interrupt */
+ timrot_irq_acknowledge();
+ }
+
+#ifdef DEBUG
+ pr_info("%s: changing mode from %s to %s\n", __func__,
+ clock_event_mode_label[mxs_clockevent_mode],
+ clock_event_mode_label[mode]);
+#endif /* DEBUG */
+
+ /* Remember timer mode */
+ mxs_clockevent_mode = mode;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_err("%s: Periodic mode is not implemented\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ timrot_irq_enable();
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_RESUME:
+ /* Left event sources disabled, no more interrupts appear */
+ break;
+ }
+}
+
+static struct clock_event_device mxs_clockevent_device = {
+ .name = "mxs_timrot",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = mxs_set_mode,
+ .set_next_event = timrotv2_set_next_event,
+ .rating = 200,
+};
+
+static int __init mxs_clockevent_init(struct clk *timer_clk)
+{
+ if (timrot_is_v1())
+ mxs_clockevent_device.set_next_event = timrotv1_set_next_event;
+ mxs_clockevent_device.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&mxs_clockevent_device,
+ clk_get_rate(timer_clk),
+ timrot_is_v1() ? 0xf : 0x2,
+ timrot_is_v1() ? 0xfffe : 0xfffffffe);
+
+ return 0;
+}
+
+static struct clocksource clocksource_mxs = {
+ .name = "mxs_timer",
+ .rating = 200,
+ .read = timrotv1_get_cycles,
+ .mask = CLOCKSOURCE_MASK(16),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 notrace mxs_read_sched_clock_v2(void)
+{
+ return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
+}
+
+static int __init mxs_clocksource_init(struct clk *timer_clk)
+{
+ unsigned int c = clk_get_rate(timer_clk);
+
+ if (timrot_is_v1())
+ clocksource_register_hz(&clocksource_mxs, c);
+ else {
+ clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
+ "mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
+ setup_sched_clock(mxs_read_sched_clock_v2, 32, c);
+ }
+
+ return 0;
+}
+
+static void __init mxs_timer_init(struct device_node *np)
+{
+ struct clk *timer_clk;
+ int irq;
+
+ mxs_timrot_base = of_iomap(np, 0);
+ WARN_ON(!mxs_timrot_base);
+
+ timer_clk = of_clk_get(np, 0);
+ if (IS_ERR(timer_clk)) {
+ pr_err("%s: failed to get clk\n", __func__);
+ return;
+ }
+
+ clk_prepare_enable(timer_clk);
+
+ /*
+ * Initialize timers to a known state
+ */
+ stmp_reset_block(mxs_timrot_base + HW_TIMROT_ROTCTRL);
+
+ /* get timrot version */
+ timrot_major_version = __raw_readl(mxs_timrot_base +
+ (of_device_is_compatible(np, "fsl,imx23-timrot") ?
+ MX23_TIMROT_VERSION_OFFSET :
+ MX28_TIMROT_VERSION_OFFSET));
+ timrot_major_version >>= BP_TIMROT_MAJOR_VERSION;
+
+ /* one for clock_event */
+ __raw_writel((timrot_is_v1() ?
+ BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL :
+ BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) |
+ BM_TIMROT_TIMCTRLn_UPDATE |
+ BM_TIMROT_TIMCTRLn_IRQ_EN,
+ mxs_timrot_base + HW_TIMROT_TIMCTRLn(0));
+
+ /* another for clocksource */
+ __raw_writel((timrot_is_v1() ?
+ BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL :
+ BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) |
+ BM_TIMROT_TIMCTRLn_RELOAD,
+ mxs_timrot_base + HW_TIMROT_TIMCTRLn(1));
+
+ /* set clocksource timer fixed count to the maximum */
+ if (timrot_is_v1())
+ __raw_writel(0xffff,
+ mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
+ else
+ __raw_writel(0xffffffff,
+ mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
+
+ /* init and register the timer to the framework */
+ mxs_clocksource_init(timer_clk);
+ mxs_clockevent_init(timer_clk);
+
+ /* Make irqs happen */
+ irq = irq_of_parse_and_map(np, 0);
+ setup_irq(irq, &mxs_timer_irq);
+}
+CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 071f6ea..e405531 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -67,7 +67,7 @@ static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
static struct delay_timer mtu_delay_timer;
-#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
+#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
@@ -233,7 +233,7 @@ void __init nmdk_timer_init(void __iomem *base, int irq)
pr_err("timer: failed to initialize clock source %s\n",
"mtu_0");
-#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
+#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
setup_sched_clock(nomadik_read_sched_clock, 32, rate);
#endif
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
new file mode 100644
index 0000000..0234c8d
--- /dev/null
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * samsung - Common hr-timer support (s3c and s5p)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <clocksource/samsung_pwm.h>
+
+#include <asm/sched_clock.h>
+
+/*
+ * Clocksource driver
+ */
+
+#define REG_TCFG0 0x00
+#define REG_TCFG1 0x04
+#define REG_TCON 0x08
+#define REG_TINT_CSTAT 0x44
+
+#define REG_TCNTB(chan) (0x0c + 12 * (chan))
+#define REG_TCMPB(chan) (0x10 + 12 * (chan))
+
+#define TCFG0_PRESCALER_MASK 0xff
+#define TCFG0_PRESCALER1_SHIFT 8
+
+#define TCFG1_SHIFT(x) ((x) * 4)
+#define TCFG1_MUX_MASK 0xf
+
+#define TCON_START(chan) (1 << (4 * (chan) + 0))
+#define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1))
+#define TCON_INVERT(chan) (1 << (4 * (chan) + 2))
+#define TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3))
+
+DEFINE_SPINLOCK(samsung_pwm_lock);
+EXPORT_SYMBOL(samsung_pwm_lock);
+
+struct samsung_pwm_clocksource {
+ void __iomem *base;
+ unsigned int irq[SAMSUNG_PWM_NUM];
+ struct samsung_pwm_variant variant;
+
+ struct clk *timerclk;
+
+ unsigned int event_id;
+ unsigned int source_id;
+ unsigned int tcnt_max;
+ unsigned int tscaler_div;
+ unsigned int tdiv;
+
+ unsigned long clock_count_per_tick;
+};
+
+static struct samsung_pwm_clocksource pwm;
+
+static void samsung_timer_set_prescale(unsigned int channel, u16 prescale)
+{
+ unsigned long flags;
+ u8 shift = 0;
+ u32 reg;
+
+ if (channel >= 2)
+ shift = TCFG0_PRESCALER1_SHIFT;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ reg = readl(pwm.base + REG_TCFG0);
+ reg &= ~(TCFG0_PRESCALER_MASK << shift);
+ reg |= (prescale - 1) << shift;
+ writel(reg, pwm.base + REG_TCFG0);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
+static void samsung_timer_set_divisor(unsigned int channel, u8 divisor)
+{
+ u8 shift = TCFG1_SHIFT(channel);
+ unsigned long flags;
+ u32 reg;
+ u8 bits;
+
+ bits = (fls(divisor) - 1) - pwm.variant.div_base;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ reg = readl(pwm.base + REG_TCFG1);
+ reg &= ~(TCFG1_MUX_MASK << shift);
+ reg |= bits << shift;
+ writel(reg, pwm.base + REG_TCFG1);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
+static void samsung_time_stop(unsigned int channel)
+{
+ unsigned long tcon;
+ unsigned long flags;
+
+ if (channel > 0)
+ ++channel;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon &= ~TCON_START(channel);
+ __raw_writel(tcon, pwm.base + REG_TCON);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
+static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
+{
+ unsigned long tcon;
+ unsigned long flags;
+ unsigned int tcon_chan = channel;
+
+ if (tcon_chan > 0)
+ ++tcon_chan;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ tcon = __raw_readl(pwm.base + REG_TCON);
+
+ tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
+ tcon |= TCON_MANUALUPDATE(tcon_chan);
+
+ __raw_writel(tcnt, pwm.base + REG_TCNTB(channel));
+ __raw_writel(tcnt, pwm.base + REG_TCMPB(channel));
+ __raw_writel(tcon, pwm.base + REG_TCON);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
+static void samsung_time_start(unsigned int channel, bool periodic)
+{
+ unsigned long tcon;
+ unsigned long flags;
+
+ if (channel > 0)
+ ++channel;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ tcon = __raw_readl(pwm.base + REG_TCON);
+
+ tcon &= ~TCON_MANUALUPDATE(channel);
+ tcon |= TCON_START(channel);
+
+ if (periodic)
+ tcon |= TCON_AUTORELOAD(channel);
+ else
+ tcon &= ~TCON_AUTORELOAD(channel);
+
+ __raw_writel(tcon, pwm.base + REG_TCON);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
+static int samsung_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ /*
+ * This check is needed to account for internal rounding
+ * errors inside clockevents core, which might result in
+ * passing cycles = 0, which in turn would not generate any
+ * timer interrupt and hang the system.
+ *
+ * Another solution would be to set up the clockevent device
+ * with min_delta = 2, but this would unnecessarily increase
+ * the minimum sleep period.
+ */
+ if (!cycles)
+ cycles = 1;
+
+ samsung_time_setup(pwm.event_id, cycles);
+ samsung_time_start(pwm.event_id, false);
+
+ return 0;
+}
+
+static void samsung_timer_resume(void)
+{
+ /* event timer restart */
+ samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
+ samsung_time_start(pwm.event_id, true);
+
+ /* source timer restart */
+ samsung_time_setup(pwm.source_id, pwm.tcnt_max);
+ samsung_time_start(pwm.source_id, true);
+}
+
+static void samsung_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ samsung_time_stop(pwm.event_id);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
+ samsung_time_start(pwm.event_id, true);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ samsung_timer_resume();
+ break;
+ }
+}
+
+static struct clock_event_device time_event_device = {
+ .name = "samsung_event_timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = samsung_set_next_event,
+ .set_mode = samsung_set_mode,
+};
+
+static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ if (pwm.variant.has_tint_cstat) {
+ u32 mask = (1 << pwm.event_id);
+ writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
+ }
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction samsung_clock_event_irq = {
+ .name = "samsung_time_irq",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = samsung_clock_event_isr,
+ .dev_id = &time_event_device,
+};
+
+static void __init samsung_clockevent_init(void)
+{
+ unsigned long pclk;
+ unsigned long clock_rate;
+ unsigned int irq_number;
+
+ pclk = clk_get_rate(pwm.timerclk);
+
+ samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div);
+ samsung_timer_set_divisor(pwm.event_id, pwm.tdiv);
+
+ clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv);
+ pwm.clock_count_per_tick = clock_rate / HZ;
+
+ time_event_device.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&time_event_device,
+ clock_rate, 1, pwm.tcnt_max);
+
+ irq_number = pwm.irq[pwm.event_id];
+ setup_irq(irq_number, &samsung_clock_event_irq);
+
+ if (pwm.variant.has_tint_cstat) {
+ u32 mask = (1 << pwm.event_id);
+ writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
+ }
+}
+
+static void __iomem *samsung_timer_reg(void)
+{
+ switch (pwm.source_id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ return pwm.base + pwm.source_id * 0x0c + 0x14;
+
+ case 4:
+ return pwm.base + 0x40;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel. We accept that
+ * this wraps around for now, since it is just a relative time
+ * stamp. (Inspired by U300 implementation.)
+ */
+static u32 notrace samsung_read_sched_clock(void)
+{
+ void __iomem *reg = samsung_timer_reg();
+
+ if (!reg)
+ return 0;
+
+ return ~__raw_readl(reg);
+}
+
+static void __init samsung_clocksource_init(void)
+{
+ void __iomem *reg = samsung_timer_reg();
+ unsigned long pclk;
+ unsigned long clock_rate;
+ int ret;
+
+ pclk = clk_get_rate(pwm.timerclk);
+
+ samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div);
+ samsung_timer_set_divisor(pwm.source_id, pwm.tdiv);
+
+ clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv);
+
+ samsung_time_setup(pwm.source_id, pwm.tcnt_max);
+ samsung_time_start(pwm.source_id, true);
+
+ setup_sched_clock(samsung_read_sched_clock,
+ pwm.variant.bits, clock_rate);
+
+ ret = clocksource_mmio_init(reg, "samsung_clocksource_timer",
+ clock_rate, 250, pwm.variant.bits,
+ clocksource_mmio_readl_down);
+ if (ret)
+ panic("samsung_clocksource_timer: can't register clocksource\n");
+}
+
+static void __init samsung_timer_resources(void)
+{
+ pwm.timerclk = clk_get(NULL, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
+ clk_prepare_enable(pwm.timerclk);
+
+ pwm.tcnt_max = (1UL << pwm.variant.bits) - 1;
+ if (pwm.variant.bits == 16) {
+ pwm.tscaler_div = 25;
+ pwm.tdiv = 2;
+ } else {
+ pwm.tscaler_div = 2;
+ pwm.tdiv = 1;
+ }
+}
+
+/*
+ * PWM master driver
+ */
+static void __init _samsung_pwm_clocksource_init(void)
+{
+ u8 mask;
+ int channel;
+
+ mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
+ channel = fls(mask) - 1;
+ if (channel < 0)
+ panic("failed to find PWM channel for clocksource");
+ pwm.source_id = channel;
+
+ mask &= ~(1 << channel);
+ channel = fls(mask) - 1;
+ if (channel < 0)
+ panic("failed to find PWM channel for clock event");
+ pwm.event_id = channel;
+
+ samsung_timer_resources();
+ samsung_clockevent_init();
+ samsung_clocksource_init();
+}
+
+void __init samsung_pwm_clocksource_init(void __iomem *base,
+ unsigned int *irqs, struct samsung_pwm_variant *variant)
+{
+ pwm.base = base;
+ memcpy(&pwm.variant, variant, sizeof(pwm.variant));
+ memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs));
+
+ _samsung_pwm_clocksource_init();
+}
+
+#ifdef CONFIG_CLKSRC_OF
+static void __init samsung_pwm_alloc(struct device_node *np,
+ const struct samsung_pwm_variant *variant)
+{
+ struct resource res;
+ struct property *prop;
+ const __be32 *cur;
+ u32 val;
+ int i;
+
+ memcpy(&pwm.variant, variant, sizeof(pwm.variant));
+ for (i = 0; i < SAMSUNG_PWM_NUM; ++i)
+ pwm.irq[i] = irq_of_parse_and_map(np, i);
+
+ of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
+ if (val >= SAMSUNG_PWM_NUM) {
+ pr_warning("%s: invalid channel index in samsung,pwm-outputs property\n",
+ __func__);
+ continue;
+ }
+ pwm.variant.output_mask |= 1 << val;
+ }
+
+ of_address_to_resource(np, 0, &res);
+ if (!request_mem_region(res.start,
+ resource_size(&res), "samsung-pwm")) {
+ pr_err("%s: failed to request IO mem region\n", __func__);
+ return;
+ }
+
+ pwm.base = ioremap(res.start, resource_size(&res));
+ if (!pwm.base) {
+ pr_err("%s: failed to map PWM registers\n", __func__);
+ release_mem_region(res.start, resource_size(&res));
+ return;
+ }
+
+ _samsung_pwm_clocksource_init();
+}
+
+static const struct samsung_pwm_variant s3c24xx_variant = {
+ .bits = 16,
+ .div_base = 1,
+ .has_tint_cstat = false,
+ .tclk_mask = (1 << 4),
+};
+
+static void __init s3c2410_pwm_clocksource_init(struct device_node *np)
+{
+ samsung_pwm_alloc(np, &s3c24xx_variant);
+}
+CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
+
+static const struct samsung_pwm_variant s3c64xx_variant = {
+ .bits = 32,
+ .div_base = 0,
+ .has_tint_cstat = true,
+ .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
+};
+
+static void __init s3c64xx_pwm_clocksource_init(struct device_node *np)
+{
+ samsung_pwm_alloc(np, &s3c64xx_variant);
+}
+CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
+
+static const struct samsung_pwm_variant s5p64x0_variant = {
+ .bits = 32,
+ .div_base = 0,
+ .has_tint_cstat = true,
+ .tclk_mask = 0,
+};
+
+static void __init s5p64x0_pwm_clocksource_init(struct device_node *np)
+{
+ samsung_pwm_alloc(np, &s5p64x0_variant);
+}
+CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
+
+static const struct samsung_pwm_variant s5p_variant = {
+ .bits = 32,
+ .div_base = 0,
+ .has_tint_cstat = true,
+ .tclk_mask = (1 << 5),
+};
+
+static void __init s5p_pwm_clocksource_init(struct device_node *np)
+{
+ samsung_pwm_alloc(np, &s5p_variant);
+}
+CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
+#endif
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 488c14c..08d0c41 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -54,62 +54,100 @@ struct sh_cmt_priv {
struct clocksource cs;
unsigned long total_cycles;
bool cs_enabled;
+
+ /* callbacks for CMSTR and CMCSR access */
+ unsigned long (*read_control)(void __iomem *base, unsigned long offs);
+ void (*write_control)(void __iomem *base, unsigned long offs,
+ unsigned long value);
+
+ /* callbacks for CMCNT and CMCOR access */
+ unsigned long (*read_count)(void __iomem *base, unsigned long offs);
+ void (*write_count)(void __iomem *base, unsigned long offs,
+ unsigned long value);
};
-static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
+/* Examples of supported CMT timer register layouts and I/O access widths:
+ *
+ * "16-bit counter and 16-bit control" as found on sh7263:
+ * CMSTR 0xfffec000 16-bit
+ * CMCSR 0xfffec002 16-bit
+ * CMCNT 0xfffec004 16-bit
+ * CMCOR 0xfffec006 16-bit
+ *
+ * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
+ * CMSTR 0xffca0000 16-bit
+ * CMCSR 0xffca0060 16-bit
+ * CMCNT 0xffca0064 32-bit
+ * CMCOR 0xffca0068 32-bit
+ */
+
+static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
+{
+ return ioread16(base + (offs << 1));
+}
+
+static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
+{
+ return ioread32(base + (offs << 2));
+}
+
+static void sh_cmt_write16(void __iomem *base, unsigned long offs,
+ unsigned long value)
+{
+ iowrite16(value, base + (offs << 1));
+}
+
+static void sh_cmt_write32(void __iomem *base, unsigned long offs,
+ unsigned long value)
+{
+ iowrite32(value, base + (offs << 2));
+}
-#define CMSTR -1 /* shared register */
#define CMCSR 0 /* channel register */
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
-static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
+static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
- unsigned long offs;
-
- if (reg_nr == CMSTR) {
- offs = 0;
- base -= cfg->channel_offset;
- } else
- offs = reg_nr;
-
- if (p->width == 16)
- offs <<= 1;
- else {
- offs <<= 2;
- if ((reg_nr == CMCNT) || (reg_nr == CMCOR))
- return ioread32(base + offs);
- }
- return ioread16(base + offs);
+ return p->read_control(p->mapbase - cfg->channel_offset, 0);
}
-static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
- unsigned long value)
+static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
+{
+ return p->read_control(p->mapbase, CMCSR);
+}
+
+static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
+{
+ return p->read_count(p->mapbase, CMCNT);
+}
+
+static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
+ unsigned long value)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
- unsigned long offs;
-
- if (reg_nr == CMSTR) {
- offs = 0;
- base -= cfg->channel_offset;
- } else
- offs = reg_nr;
-
- if (p->width == 16)
- offs <<= 1;
- else {
- offs <<= 2;
- if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) {
- iowrite32(value, base + offs);
- return;
- }
- }
- iowrite16(value, base + offs);
+ p->write_control(p->mapbase - cfg->channel_offset, 0, value);
+}
+
+static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
+ unsigned long value)
+{
+ p->write_control(p->mapbase, CMCSR, value);
+}
+
+static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
+ unsigned long value)
+{
+ p->write_count(p->mapbase, CMCNT, value);
+}
+
+static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
+ unsigned long value)
+{
+ p->write_count(p->mapbase, CMCOR, value);
}
static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
@@ -118,15 +156,15 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
unsigned long v1, v2, v3;
int o1, o2;
- o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
- v1 = sh_cmt_read(p, CMCNT);
- v2 = sh_cmt_read(p, CMCNT);
- v3 = sh_cmt_read(p, CMCNT);
- o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ v1 = sh_cmt_read_cmcnt(p);
+ v2 = sh_cmt_read_cmcnt(p);
+ v3 = sh_cmt_read_cmcnt(p);
+ o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
@@ -134,6 +172,7 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
return v2;
}
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
{
@@ -142,14 +181,14 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&sh_cmt_lock, flags);
- value = sh_cmt_read(p, CMSTR);
+ value = sh_cmt_read_cmstr(p);
if (start)
value |= 1 << cfg->timer_bit;
else
value &= ~(1 << cfg->timer_bit);
- sh_cmt_write(p, CMSTR, value);
+ sh_cmt_write_cmstr(p, value);
raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
@@ -173,14 +212,14 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
/* configure channel, periodic mode and maximum timeout */
if (p->width == 16) {
*rate = clk_get_rate(p->clk) / 512;
- sh_cmt_write(p, CMCSR, 0x43);
+ sh_cmt_write_cmcsr(p, 0x43);
} else {
*rate = clk_get_rate(p->clk) / 8;
- sh_cmt_write(p, CMCSR, 0x01a4);
+ sh_cmt_write_cmcsr(p, 0x01a4);
}
- sh_cmt_write(p, CMCOR, 0xffffffff);
- sh_cmt_write(p, CMCNT, 0);
+ sh_cmt_write_cmcor(p, 0xffffffff);
+ sh_cmt_write_cmcnt(p, 0);
/*
* According to the sh73a0 user's manual, as CMCNT can be operated
@@ -194,12 +233,12 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
* take RCLKx2 at maximum.
*/
for (k = 0; k < 100; k++) {
- if (!sh_cmt_read(p, CMCNT))
+ if (!sh_cmt_read_cmcnt(p))
break;
udelay(1);
}
- if (sh_cmt_read(p, CMCNT)) {
+ if (sh_cmt_read_cmcnt(p)) {
dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
ret = -ETIMEDOUT;
goto err1;
@@ -222,7 +261,7 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
sh_cmt_start_stop_ch(p, 0);
/* disable interrupts in CMT block */
- sh_cmt_write(p, CMCSR, 0);
+ sh_cmt_write_cmcsr(p, 0);
/* stop clock */
clk_disable(p->clk);
@@ -270,7 +309,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
if (new_match > p->max_match_value)
new_match = p->max_match_value;
- sh_cmt_write(p, CMCOR, new_match);
+ sh_cmt_write_cmcor(p, new_match);
now = sh_cmt_get_counter(p, &has_wrapped);
if (has_wrapped && (new_match > p->match_value)) {
@@ -346,7 +385,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
struct sh_cmt_priv *p = dev_id;
/* clear flags */
- sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits);
+ sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
/* update clock source counter to begin with if enabled
* the wrap flag should be cleared by the timer specific
@@ -625,14 +664,6 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
unsigned long clockevent_rating,
unsigned long clocksource_rating)
{
- if (p->width == (sizeof(p->max_match_value) * 8))
- p->max_match_value = ~0;
- else
- p->max_match_value = (1 << p->width) - 1;
-
- p->match_value = p->max_match_value;
- raw_spin_lock_init(&p->lock);
-
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
@@ -657,8 +688,6 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err0;
}
- platform_set_drvdata(pdev, p);
-
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
@@ -693,32 +722,51 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err1;
}
+ p->read_control = sh_cmt_read16;
+ p->write_control = sh_cmt_write16;
+
if (resource_size(res) == 6) {
p->width = 16;
+ p->read_count = sh_cmt_read16;
+ p->write_count = sh_cmt_write16;
p->overflow_bit = 0x80;
p->clear_bits = ~0x80;
} else {
p->width = 32;
+ p->read_count = sh_cmt_read32;
+ p->write_count = sh_cmt_write32;
p->overflow_bit = 0x8000;
p->clear_bits = ~0xc000;
}
+ if (p->width == (sizeof(p->max_match_value) * 8))
+ p->max_match_value = ~0;
+ else
+ p->max_match_value = (1 << p->width) - 1;
+
+ p->match_value = p->max_match_value;
+ raw_spin_lock_init(&p->lock);
+
ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
- goto err1;
+ goto err2;
}
p->cs_enabled = false;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err1;
+ goto err2;
}
+ platform_set_drvdata(pdev, p);
+
return 0;
+err2:
+ clk_put(p->clk);
err1:
iounmap(p->mapbase);
@@ -751,7 +799,6 @@ static int sh_cmt_probe(struct platform_device *pdev)
ret = sh_cmt_setup(p, pdev);
if (ret) {
kfree(p);
- platform_set_drvdata(pdev, NULL);
pm_runtime_idle(&pdev->dev);
return ret;
}
@@ -791,7 +838,7 @@ static void __exit sh_cmt_exit(void)
}
early_platform_init("earlytimer", &sh_cmt_device_driver);
-module_init(sh_cmt_init);
+subsys_initcall(sh_cmt_init);
module_exit(sh_cmt_exit);
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 83943e2..4aac9ee 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -386,7 +386,7 @@ static void __exit sh_mtu2_exit(void)
}
early_platform_init("earlytimer", &sh_mtu2_device_driver);
-module_init(sh_mtu2_init);
+subsys_initcall(sh_mtu2_init);
module_exit(sh_mtu2_exit);
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index b4502edc..78b8dae 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -549,7 +549,7 @@ static void __exit sh_tmu_exit(void)
}
early_platform_init("earlytimer", &sh_tmu_device_driver);
-module_init(sh_tmu_init);
+subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sun4i_timer.c
index 0ce85e2..d4674e7 100644
--- a/drivers/clocksource/sunxi_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -22,66 +22,64 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/sunxi_timer.h>
-#include <linux/clk/sunxi.h>
-#define TIMER_CTL_REG 0x00
-#define TIMER_CTL_ENABLE (1 << 0)
+#define TIMER_IRQ_EN_REG 0x00
+#define TIMER_IRQ_EN(val) (1 << val)
#define TIMER_IRQ_ST_REG 0x04
-#define TIMER0_CTL_REG 0x10
-#define TIMER0_CTL_ENABLE (1 << 0)
-#define TIMER0_CTL_AUTORELOAD (1 << 1)
-#define TIMER0_CTL_ONESHOT (1 << 7)
-#define TIMER0_INTVAL_REG 0x14
-#define TIMER0_CNTVAL_REG 0x18
+#define TIMER_CTL_REG(val) (0x10 * val + 0x10)
+#define TIMER_CTL_ENABLE (1 << 0)
+#define TIMER_CTL_AUTORELOAD (1 << 1)
+#define TIMER_CTL_ONESHOT (1 << 7)
+#define TIMER_INTVAL_REG(val) (0x10 * val + 0x14)
+#define TIMER_CNTVAL_REG(val) (0x10 * val + 0x18)
#define TIMER_SCAL 16
static void __iomem *timer_base;
-static void sunxi_clkevt_mode(enum clock_event_mode mode,
+static void sun4i_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- u32 u = readl(timer_base + TIMER0_CTL_REG);
+ u32 u = readl(timer_base + TIMER_CTL_REG(0));
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- u &= ~(TIMER0_CTL_ONESHOT);
- writel(u | TIMER0_CTL_ENABLE, timer_base + TIMER0_CTL_REG);
+ u &= ~(TIMER_CTL_ONESHOT);
+ writel(u | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(0));
break;
case CLOCK_EVT_MODE_ONESHOT:
- writel(u | TIMER0_CTL_ONESHOT, timer_base + TIMER0_CTL_REG);
+ writel(u | TIMER_CTL_ONESHOT, timer_base + TIMER_CTL_REG(0));
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
- writel(u & ~(TIMER0_CTL_ENABLE), timer_base + TIMER0_CTL_REG);
+ writel(u & ~(TIMER_CTL_ENABLE), timer_base + TIMER_CTL_REG(0));
break;
}
}
-static int sunxi_clkevt_next_event(unsigned long evt,
+static int sun4i_clkevt_next_event(unsigned long evt,
struct clock_event_device *unused)
{
- u32 u = readl(timer_base + TIMER0_CTL_REG);
- writel(evt, timer_base + TIMER0_CNTVAL_REG);
- writel(u | TIMER0_CTL_ENABLE | TIMER0_CTL_AUTORELOAD,
- timer_base + TIMER0_CTL_REG);
+ u32 u = readl(timer_base + TIMER_CTL_REG(0));
+ writel(evt, timer_base + TIMER_CNTVAL_REG(0));
+ writel(u | TIMER_CTL_ENABLE | TIMER_CTL_AUTORELOAD,
+ timer_base + TIMER_CTL_REG(0));
return 0;
}
-static struct clock_event_device sunxi_clockevent = {
- .name = "sunxi_tick",
+static struct clock_event_device sun4i_clockevent = {
+ .name = "sun4i_tick",
.rating = 300,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = sunxi_clkevt_mode,
- .set_next_event = sunxi_clkevt_next_event,
+ .set_mode = sun4i_clkevt_mode,
+ .set_next_event = sun4i_clkevt_next_event,
};
-static irqreturn_t sunxi_timer_interrupt(int irq, void *dev_id)
+static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
@@ -91,30 +89,20 @@ static irqreturn_t sunxi_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction sunxi_timer_irq = {
- .name = "sunxi_timer0",
+static struct irqaction sun4i_timer_irq = {
+ .name = "sun4i_timer0",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = sunxi_timer_interrupt,
- .dev_id = &sunxi_clockevent,
-};
-
-static struct of_device_id sunxi_timer_dt_ids[] = {
- { .compatible = "allwinner,sunxi-timer" },
- { }
+ .handler = sun4i_timer_interrupt,
+ .dev_id = &sun4i_clockevent,
};
-void __init sunxi_timer_init(void)
+static void __init sun4i_timer_init(struct device_node *node)
{
- struct device_node *node;
unsigned long rate = 0;
struct clk *clk;
int ret, irq;
u32 val;
- node = of_find_matching_node(NULL, sunxi_timer_dt_ids);
- if (!node)
- panic("No sunxi timer node");
-
timer_base = of_iomap(node, 0);
if (!timer_base)
panic("Can't map registers");
@@ -123,8 +111,6 @@ void __init sunxi_timer_init(void)
if (irq <= 0)
panic("Can't parse IRQ");
- sunxi_init_clocks();
-
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
panic("Can't get timer clock");
@@ -132,29 +118,31 @@ void __init sunxi_timer_init(void)
rate = clk_get_rate(clk);
writel(rate / (TIMER_SCAL * HZ),
- timer_base + TIMER0_INTVAL_REG);
+ timer_base + TIMER_INTVAL_REG(0));
/* set clock source to HOSC, 16 pre-division */
- val = readl(timer_base + TIMER0_CTL_REG);
+ val = readl(timer_base + TIMER_CTL_REG(0));
val &= ~(0x07 << 4);
val &= ~(0x03 << 2);
val |= (4 << 4) | (1 << 2);
- writel(val, timer_base + TIMER0_CTL_REG);
+ writel(val, timer_base + TIMER_CTL_REG(0));
/* set mode to auto reload */
- val = readl(timer_base + TIMER0_CTL_REG);
- writel(val | TIMER0_CTL_AUTORELOAD, timer_base + TIMER0_CTL_REG);
+ val = readl(timer_base + TIMER_CTL_REG(0));
+ writel(val | TIMER_CTL_AUTORELOAD, timer_base + TIMER_CTL_REG(0));
- ret = setup_irq(irq, &sunxi_timer_irq);
+ ret = setup_irq(irq, &sun4i_timer_irq);
if (ret)
pr_warn("failed to setup irq %d\n", irq);
/* Enable timer0 interrupt */
- val = readl(timer_base + TIMER_CTL_REG);
- writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG);
+ val = readl(timer_base + TIMER_IRQ_EN_REG);
+ writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
- sunxi_clockevent.cpumask = cpumask_of(0);
+ sun4i_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sunxi_clockevent, rate / TIMER_SCAL,
+ clockevents_config_and_register(&sun4i_clockevent, rate / TIMER_SCAL,
0x1, 0xff);
}
+CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
+ sun4i_timer_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 0bde03f..ae877b0 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -154,29 +154,12 @@ static struct irqaction tegra_timer_irq = {
.dev_id = &tegra_clockevent,
};
-static const struct of_device_id timer_match[] __initconst = {
- { .compatible = "nvidia,tegra20-timer" },
- {}
-};
-
-static const struct of_device_id rtc_match[] __initconst = {
- { .compatible = "nvidia,tegra20-rtc" },
- {}
-};
-
-static void __init tegra20_init_timer(void)
+static void __init tegra20_init_timer(struct device_node *np)
{
- struct device_node *np;
struct clk *clk;
unsigned long rate;
int ret;
- np = of_find_matching_node(NULL, timer_match);
- if (!np) {
- pr_err("Failed to find timer DT node\n");
- BUG();
- }
-
timer_reg_base = of_iomap(np, 0);
if (!timer_reg_base) {
pr_err("Can't map timer registers\n");
@@ -189,7 +172,7 @@ static void __init tegra20_init_timer(void)
BUG();
}
- clk = clk_get_sys("timer", NULL);
+ clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
rate = 12000000;
@@ -200,30 +183,6 @@ static void __init tegra20_init_timer(void)
of_node_put(np);
- np = of_find_matching_node(NULL, rtc_match);
- if (!np) {
- pr_err("Failed to find RTC DT node\n");
- BUG();
- }
-
- rtc_base = of_iomap(np, 0);
- if (!rtc_base) {
- pr_err("Can't map RTC registers");
- BUG();
- }
-
- /*
- * rtc registers are used by read_persistent_clock, keep the rtc clock
- * enabled
- */
- clk = clk_get_sys("rtc-tegra", NULL);
- if (IS_ERR(clk))
- pr_warn("Unable to get rtc-tegra clock\n");
- else
- clk_prepare_enable(clk);
-
- of_node_put(np);
-
switch (rate) {
case 12000000:
timer_writel(0x000b, TIMERUS_USEC_CFG);
@@ -259,12 +218,34 @@ static void __init tegra20_init_timer(void)
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
-#ifdef CONFIG_HAVE_ARM_TWD
- twd_local_timer_of_register();
-#endif
+}
+CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
+
+static void __init tegra20_init_rtc(struct device_node *np)
+{
+ struct clk *clk;
+
+ rtc_base = of_iomap(np, 0);
+ if (!rtc_base) {
+ pr_err("Can't map RTC registers");
+ BUG();
+ }
+
+ /*
+ * rtc registers are used by read_persistent_clock, keep the rtc clock
+ * enabled
+ */
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk))
+ pr_warn("Unable to get rtc-tegra clock\n");
+ else
+ clk_prepare_enable(clk);
+
+ of_node_put(np);
+
register_persistent_clock(NULL, tegra_read_persistent_clock);
}
-CLOCKSOURCE_OF_DECLARE(tegra20, "nvidia,tegra20-timer", tegra20_init_timer);
+CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
#ifdef CONFIG_PM
static u32 usec_config;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
new file mode 100644
index 0000000..97738db
--- /dev/null
+++ b/drivers/clocksource/timer-marco.c
@@ -0,0 +1,299 @@
+/*
+ * System timer for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/sched_clock.h>
+#include <asm/localtimer.h>
+#include <asm/mach/time.h>
+
+#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
+#define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004
+#define SIRFSOC_TIMER_MATCH_0 0x0018
+#define SIRFSOC_TIMER_MATCH_1 0x001c
+#define SIRFSOC_TIMER_COUNTER_0 0x0048
+#define SIRFSOC_TIMER_COUNTER_1 0x004c
+#define SIRFSOC_TIMER_INTR_STATUS 0x0060
+#define SIRFSOC_TIMER_WATCHDOG_EN 0x0064
+#define SIRFSOC_TIMER_64COUNTER_CTRL 0x0068
+#define SIRFSOC_TIMER_64COUNTER_LO 0x006c
+#define SIRFSOC_TIMER_64COUNTER_HI 0x0070
+#define SIRFSOC_TIMER_64COUNTER_LOAD_LO 0x0074
+#define SIRFSOC_TIMER_64COUNTER_LOAD_HI 0x0078
+#define SIRFSOC_TIMER_64COUNTER_RLATCHED_LO 0x007c
+#define SIRFSOC_TIMER_64COUNTER_RLATCHED_HI 0x0080
+
+#define SIRFSOC_TIMER_REG_CNT 6
+
+static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
+ SIRFSOC_TIMER_WATCHDOG_EN,
+ SIRFSOC_TIMER_32COUNTER_0_CTRL,
+ SIRFSOC_TIMER_32COUNTER_1_CTRL,
+ SIRFSOC_TIMER_64COUNTER_CTRL,
+ SIRFSOC_TIMER_64COUNTER_RLATCHED_LO,
+ SIRFSOC_TIMER_64COUNTER_RLATCHED_HI,
+};
+
+static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
+
+static void __iomem *sirfsoc_timer_base;
+
+/* disable count and interrupt */
+static inline void sirfsoc_timer_count_disable(int idx)
+{
+ writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) & ~0x7,
+ sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
+}
+
+/* enable count and interrupt */
+static inline void sirfsoc_timer_count_enable(int idx)
+{
+ writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) | 0x7,
+ sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
+}
+
+/* timer interrupt handler */
+static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = dev_id;
+ int cpu = smp_processor_id();
+
+ /* clear timer interrupt */
+ writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
+
+ if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+ sirfsoc_timer_count_disable(cpu);
+
+ ce->event_handler(ce);
+
+ return IRQ_HANDLED;
+}
+
+/* read 64-bit timer counter */
+static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+{
+ u64 cycles;
+
+ writel_relaxed((readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
+ BIT(0)) & ~BIT(1), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
+
+ cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_HI);
+ cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_LO);
+
+ return cycles;
+}
+
+static int sirfsoc_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ int cpu = smp_processor_id();
+
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0 +
+ 4 * cpu);
+ writel_relaxed(delta, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0 +
+ 4 * cpu);
+
+ /* enable the tick */
+ sirfsoc_timer_count_enable(cpu);
+
+ return 0;
+}
+
+static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *ce)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* enable in set_next_event */
+ break;
+ default:
+ break;
+ }
+
+ sirfsoc_timer_count_disable(smp_processor_id());
+}
+
+static void sirfsoc_clocksource_suspend(struct clocksource *cs)
+{
+ int i;
+
+ for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
+ sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
+}
+
+static void sirfsoc_clocksource_resume(struct clocksource *cs)
+{
+ int i;
+
+ for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
+ writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
+
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
+ sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
+ sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
+
+ writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
+ BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
+}
+
+static struct clock_event_device sirfsoc_clockevent = {
+ .name = "sirfsoc_clockevent",
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sirfsoc_timer_set_mode,
+ .set_next_event = sirfsoc_timer_set_next_event,
+};
+
+static struct clocksource sirfsoc_clocksource = {
+ .name = "sirfsoc_clocksource",
+ .rating = 200,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = sirfsoc_timer_read,
+ .suspend = sirfsoc_clocksource_suspend,
+ .resume = sirfsoc_clocksource_resume,
+};
+
+static struct irqaction sirfsoc_timer_irq = {
+ .name = "sirfsoc_timer0",
+ .flags = IRQF_TIMER | IRQF_NOBALANCING,
+ .handler = sirfsoc_timer_interrupt,
+ .dev_id = &sirfsoc_clockevent,
+};
+
+#ifdef CONFIG_LOCAL_TIMERS
+
+static struct irqaction sirfsoc_timer1_irq = {
+ .name = "sirfsoc_timer1",
+ .flags = IRQF_TIMER | IRQF_NOBALANCING,
+ .handler = sirfsoc_timer_interrupt,
+};
+
+static int __cpuinit sirfsoc_local_timer_setup(struct clock_event_device *ce)
+{
+ /* Use existing clock_event for cpu 0 */
+ if (!smp_processor_id())
+ return 0;
+
+ ce->irq = sirfsoc_timer1_irq.irq;
+ ce->name = "local_timer";
+ ce->features = sirfsoc_clockevent.features;
+ ce->rating = sirfsoc_clockevent.rating;
+ ce->set_mode = sirfsoc_timer_set_mode;
+ ce->set_next_event = sirfsoc_timer_set_next_event;
+ ce->shift = sirfsoc_clockevent.shift;
+ ce->mult = sirfsoc_clockevent.mult;
+ ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns;
+ ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns;
+
+ sirfsoc_timer1_irq.dev_id = ce;
+ BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq));
+ irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1));
+
+ clockevents_register_device(ce);
+ return 0;
+}
+
+static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
+{
+ sirfsoc_timer_count_disable(1);
+
+ remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+}
+
+static struct local_timer_ops sirfsoc_local_timer_ops __cpuinitdata = {
+ .setup = sirfsoc_local_timer_setup,
+ .stop = sirfsoc_local_timer_stop,
+};
+#endif /* CONFIG_LOCAL_TIMERS */
+
+static void __init sirfsoc_clockevent_init(void)
+{
+ clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60);
+
+ sirfsoc_clockevent.max_delta_ns =
+ clockevent_delta2ns(-2, &sirfsoc_clockevent);
+ sirfsoc_clockevent.min_delta_ns =
+ clockevent_delta2ns(2, &sirfsoc_clockevent);
+
+ sirfsoc_clockevent.cpumask = cpumask_of(0);
+ clockevents_register_device(&sirfsoc_clockevent);
+#ifdef CONFIG_LOCAL_TIMERS
+ local_timer_register(&sirfsoc_local_timer_ops);
+#endif
+}
+
+/* initialize the kernel jiffy timer source */
+static void __init sirfsoc_marco_timer_init(void)
+{
+ unsigned long rate;
+ u32 timer_div;
+ struct clk *clk;
+
+ /* timer's input clock is io clock */
+ clk = clk_get_sys("io", NULL);
+
+ BUG_ON(IS_ERR(clk));
+ rate = clk_get_rate(clk);
+
+ BUG_ON(rate < CLOCK_TICK_RATE);
+ BUG_ON(rate % CLOCK_TICK_RATE);
+
+ /* Initialize the timer dividers */
+ timer_div = rate / CLOCK_TICK_RATE - 1;
+ writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
+ writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);
+ writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
+
+ /* Initialize timer counters to 0 */
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
+ writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
+ BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0);
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_1);
+
+ /* Clear all interrupts */
+ writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
+
+ BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
+
+ BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
+
+ sirfsoc_clockevent_init();
+}
+
+static void __init sirfsoc_of_timer_init(struct device_node *np)
+{
+ sirfsoc_timer_base = of_iomap(np, 0);
+ if (!sirfsoc_timer_base)
+ panic("unable to map timer cpu registers\n");
+
+ sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
+ if (!sirfsoc_timer_irq.irq)
+ panic("No irq passed for timer0 via DT\n");
+
+#ifdef CONFIG_LOCAL_TIMERS
+ sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
+ if (!sirfsoc_timer1_irq.irq)
+ panic("No irq passed for timer1 via DT\n");
+#endif
+
+ sirfsoc_marco_timer_init();
+}
+CLOCKSOURCE_OF_DECLARE(sirfsoc_marco_timer, "sirf,marco-tick", sirfsoc_of_timer_init );
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
new file mode 100644
index 0000000..7608826
--- /dev/null
+++ b/drivers/clocksource/timer-prima2.c
@@ -0,0 +1,215 @@
+/*
+ * System timer for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/sched_clock.h>
+#include <asm/mach/time.h>
+
+#define SIRFSOC_TIMER_COUNTER_LO 0x0000
+#define SIRFSOC_TIMER_COUNTER_HI 0x0004
+#define SIRFSOC_TIMER_MATCH_0 0x0008
+#define SIRFSOC_TIMER_MATCH_1 0x000C
+#define SIRFSOC_TIMER_MATCH_2 0x0010
+#define SIRFSOC_TIMER_MATCH_3 0x0014
+#define SIRFSOC_TIMER_MATCH_4 0x0018
+#define SIRFSOC_TIMER_MATCH_5 0x001C
+#define SIRFSOC_TIMER_STATUS 0x0020
+#define SIRFSOC_TIMER_INT_EN 0x0024
+#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028
+#define SIRFSOC_TIMER_DIV 0x002C
+#define SIRFSOC_TIMER_LATCH 0x0030
+#define SIRFSOC_TIMER_LATCHED_LO 0x0034
+#define SIRFSOC_TIMER_LATCHED_HI 0x0038
+
+#define SIRFSOC_TIMER_WDT_INDEX 5
+
+#define SIRFSOC_TIMER_LATCH_BIT BIT(0)
+
+#define SIRFSOC_TIMER_REG_CNT 11
+
+static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
+ SIRFSOC_TIMER_MATCH_0, SIRFSOC_TIMER_MATCH_1, SIRFSOC_TIMER_MATCH_2,
+ SIRFSOC_TIMER_MATCH_3, SIRFSOC_TIMER_MATCH_4, SIRFSOC_TIMER_MATCH_5,
+ SIRFSOC_TIMER_INT_EN, SIRFSOC_TIMER_WATCHDOG_EN, SIRFSOC_TIMER_DIV,
+ SIRFSOC_TIMER_LATCHED_LO, SIRFSOC_TIMER_LATCHED_HI,
+};
+
+static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
+
+static void __iomem *sirfsoc_timer_base;
+
+/* timer0 interrupt handler */
+static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = dev_id;
+
+ WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & BIT(0)));
+
+ /* clear timer0 interrupt */
+ writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
+
+ ce->event_handler(ce);
+
+ return IRQ_HANDLED;
+}
+
+/* read 64-bit timer counter */
+static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+{
+ u64 cycles;
+
+ /* latch the 64-bit timer counter */
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI);
+ cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
+
+ return cycles;
+}
+
+static int sirfsoc_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ unsigned long now, next;
+
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
+ next = now + delta;
+ writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0);
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+ now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
+
+ return next - now > delta ? -ETIME : 0;
+}
+
+static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *ce)
+{
+ u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ WARN_ON(1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel_relaxed(val & ~BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static void sirfsoc_clocksource_suspend(struct clocksource *cs)
+{
+ int i;
+
+ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
+
+ for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
+ sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
+}
+
+static void sirfsoc_clocksource_resume(struct clocksource *cs)
+{
+ int i;
+
+ for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
+ writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
+
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
+}
+
+static struct clock_event_device sirfsoc_clockevent = {
+ .name = "sirfsoc_clockevent",
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sirfsoc_timer_set_mode,
+ .set_next_event = sirfsoc_timer_set_next_event,
+};
+
+static struct clocksource sirfsoc_clocksource = {
+ .name = "sirfsoc_clocksource",
+ .rating = 200,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = sirfsoc_timer_read,
+ .suspend = sirfsoc_clocksource_suspend,
+ .resume = sirfsoc_clocksource_resume,
+};
+
+static struct irqaction sirfsoc_timer_irq = {
+ .name = "sirfsoc_timer0",
+ .flags = IRQF_TIMER,
+ .irq = 0,
+ .handler = sirfsoc_timer_interrupt,
+ .dev_id = &sirfsoc_clockevent,
+};
+
+/* Overwrite weak default sched_clock with more precise one */
+static u32 notrace sirfsoc_read_sched_clock(void)
+{
+ return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
+}
+
+static void __init sirfsoc_clockevent_init(void)
+{
+ sirfsoc_clockevent.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&sirfsoc_clockevent, CLOCK_TICK_RATE,
+ 2, -2);
+}
+
+/* initialize the kernel jiffy timer source */
+static void __init sirfsoc_prima2_timer_init(struct device_node *np)
+{
+ unsigned long rate;
+ struct clk *clk;
+
+ /* timer's input clock is io clock */
+ clk = clk_get_sys("io", NULL);
+
+ BUG_ON(IS_ERR(clk));
+
+ rate = clk_get_rate(clk);
+
+ BUG_ON(rate < CLOCK_TICK_RATE);
+ BUG_ON(rate % CLOCK_TICK_RATE);
+
+ sirfsoc_timer_base = of_iomap(np, 0);
+ if (!sirfsoc_timer_base)
+ panic("unable to map timer cpu registers\n");
+
+ sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
+
+ writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
+ writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
+ writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
+
+ BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
+
+ setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
+
+ BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
+
+ sirfsoc_clockevent_init();
+}
+CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, "sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 8efc86b..64f553f 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -129,22 +129,10 @@ static struct irqaction irq = {
.dev_id = &clockevent,
};
-static struct of_device_id vt8500_timer_ids[] = {
- { .compatible = "via,vt8500-timer" },
- { }
-};
-
-static void __init vt8500_timer_init(void)
+static void __init vt8500_timer_init(struct device_node *np)
{
- struct device_node *np;
int timer_irq;
- np = of_find_matching_node(NULL, vt8500_timer_ids);
- if (!np) {
- pr_err("%s: Timer description missing from Device Tree\n",
- __func__);
- return;
- }
regbase = of_iomap(np, 0);
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
@@ -177,4 +165,4 @@ static void __init vt8500_timer_init(void)
4, 0xf0000000);
}
-CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init)
+CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 87ec4d0..dffb855 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -276,6 +276,16 @@ config CRYPTO_DEV_PICOXCELL
Saying m here will build a module named pipcoxcell_crypto.
+config CRYPTO_DEV_SAHARA
+ tristate "Support for SAHARA crypto accelerator"
+ depends on ARCH_MXC && EXPERIMENTAL && OF
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ help
+ This option enables support for the SAHARA HW crypto accelerator
+ found in some Freescale i.MX chips.
+
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
@@ -361,15 +371,17 @@ config CRYPTO_DEV_ATMEL_TDES
will be called atmel-tdes.
config CRYPTO_DEV_ATMEL_SHA
- tristate "Support for Atmel SHA1/SHA256 hw accelerator"
+ tristate "Support for Atmel SHA hw accelerator"
depends on ARCH_AT91
select CRYPTO_SHA1
select CRYPTO_SHA256
+ select CRYPTO_SHA512
select CRYPTO_ALGAPI
help
- Some Atmel processors have SHA1/SHA256 hw accelerator.
+ Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512
+ hw accelerator.
Select this if you want to use the Atmel module for
- SHA1/SHA256 algorithms.
+ SHA1/SHA224/SHA256/SHA384/SHA512 algorithms.
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 880a47b..38ce13d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6f22ba5..c1efd91 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -38,7 +38,7 @@
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/atmel-aes.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-aes-regs.h"
#define CFB8_BLOCK_SIZE 1
@@ -47,7 +47,7 @@
#define CFB64_BLOCK_SIZE 8
/* AES flags */
-#define AES_FLAGS_MODE_MASK 0x01ff
+#define AES_FLAGS_MODE_MASK 0x03ff
#define AES_FLAGS_ENCRYPT BIT(0)
#define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_CFB BIT(2)
@@ -55,21 +55,26 @@
#define AES_FLAGS_CFB16 BIT(4)
#define AES_FLAGS_CFB32 BIT(5)
#define AES_FLAGS_CFB64 BIT(6)
-#define AES_FLAGS_OFB BIT(7)
-#define AES_FLAGS_CTR BIT(8)
+#define AES_FLAGS_CFB128 BIT(7)
+#define AES_FLAGS_OFB BIT(8)
+#define AES_FLAGS_CTR BIT(9)
#define AES_FLAGS_INIT BIT(16)
#define AES_FLAGS_DMA BIT(17)
#define AES_FLAGS_BUSY BIT(18)
+#define AES_FLAGS_FAST BIT(19)
-#define AES_FLAGS_DUALBUFF BIT(24)
-
-#define ATMEL_AES_QUEUE_LENGTH 1
-#define ATMEL_AES_CACHE_SIZE 0
+#define ATMEL_AES_QUEUE_LENGTH 50
#define ATMEL_AES_DMA_THRESHOLD 16
+struct atmel_aes_caps {
+ bool has_dualbuff;
+ bool has_cfb64;
+ u32 max_burst_size;
+};
+
struct atmel_aes_dev;
struct atmel_aes_ctx {
@@ -77,6 +82,8 @@ struct atmel_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+
+ u16 block_size;
};
struct atmel_aes_reqctx {
@@ -112,20 +119,27 @@ struct atmel_aes_dev {
struct scatterlist *in_sg;
unsigned int nb_in_sg;
-
+ size_t in_offset;
struct scatterlist *out_sg;
unsigned int nb_out_sg;
+ size_t out_offset;
size_t bufcnt;
+ size_t buflen;
+ size_t dma_size;
- u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
- int dma_in;
+ void *buf_in;
+ int dma_in;
+ dma_addr_t dma_addr_in;
struct atmel_aes_dma dma_lch_in;
- u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
- int dma_out;
+ void *buf_out;
+ int dma_out;
+ dma_addr_t dma_addr_out;
struct atmel_aes_dma dma_lch_out;
+ struct atmel_aes_caps caps;
+
u32 hw_version;
};
@@ -165,6 +179,37 @@ static int atmel_aes_sg_length(struct ablkcipher_request *req,
return sg_nb;
}
+static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
+ void *buf, size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
{
return readl_relaxed(dd->io_base + offset);
@@ -190,14 +235,6 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
atmel_aes_write(dd, offset, *value);
}
-static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
-{
- atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
-
- if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
- dd->flags |= AES_FLAGS_DUALBUFF;
-}
-
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
@@ -225,7 +262,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
if (!(dd->flags & AES_FLAGS_INIT)) {
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
- atmel_aes_dualbuff_test(dd);
+ atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
dd->flags |= AES_FLAGS_INIT;
dd->err = 0;
}
@@ -233,11 +270,19 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
return 0;
}
+static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
+{
+ return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
+}
+
static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
{
atmel_aes_hw_init(dd);
- dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
+ dd->hw_version = atmel_aes_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
}
@@ -260,50 +305,77 @@ static void atmel_aes_dma_callback(void *data)
tasklet_schedule(&dd->done_task);
}
-static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
+static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
+ dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
{
+ struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
- int nb_dma_sg_in, nb_dma_sg_out;
- dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
- if (!dd->nb_in_sg)
- goto exit_err;
+ dd->dma_size = length;
- nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
- DMA_TO_DEVICE);
- if (!nb_dma_sg_in)
- goto exit_err;
+ if (!(dd->flags & AES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
- in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
- nb_dma_sg_in, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (dd->flags & AES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & AES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
- if (!in_desc)
- goto unmap_in;
+ if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
+ AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ } else {
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ }
- /* callback not needed */
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
- dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
- if (!dd->nb_out_sg)
- goto unmap_in;
+ dd->flags |= AES_FLAGS_DMA;
- nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
- DMA_FROM_DEVICE);
- if (!nb_dma_sg_out)
- goto unmap_out;
+ sg_init_table(&sg[0], 1);
+ sg_dma_address(&sg[0]) = dma_addr_in;
+ sg_dma_len(&sg[0]) = length;
- out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
- nb_dma_sg_out, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ sg_init_table(&sg[1], 1);
+ sg_dma_address(&sg[1]) = dma_addr_out;
+ sg_dma_len(&sg[1]) = length;
+
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc)
+ return -EINVAL;
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!out_desc)
- goto unmap_out;
+ return -EINVAL;
out_desc->callback = atmel_aes_dma_callback;
out_desc->callback_param = dd;
- dd->total -= dd->req->nbytes;
-
dmaengine_submit(out_desc);
dma_async_issue_pending(dd->dma_lch_out.chan);
@@ -311,15 +383,6 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
dma_async_issue_pending(dd->dma_lch_in.chan);
return 0;
-
-unmap_out:
- dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
- DMA_FROM_DEVICE);
-unmap_in:
- dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
- DMA_TO_DEVICE);
-exit_err:
- return -EINVAL;
}
static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
@@ -352,30 +415,66 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
{
- int err;
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+
+ if ((!dd->in_offset) && (!dd->out_offset)) {
+ /* check for alignment */
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
+ fast = in && out;
+
+ if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+ fast = 0;
+ }
+
+
+ if (fast) {
+ count = min(dd->total, sg_dma_len(dd->in_sg));
+ count = min(count, sg_dma_len(dd->out_sg));
+
+ err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, dd->out_sg, 1,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, dd->in_sg, 1,
+ DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(dd->in_sg);
+ addr_out = sg_dma_address(dd->out_sg);
+
+ dd->flags |= AES_FLAGS_FAST;
- if (dd->flags & AES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & AES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
} else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ /* use cache buffers */
+ count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
+ dd->buf_in, dd->buflen, dd->total, 0);
+
+ addr_in = dd->dma_addr_in;
+ addr_out = dd->dma_addr_out;
+
+ dd->flags &= ~AES_FLAGS_FAST;
}
- dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
- dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+ dd->total -= count;
- dd->flags |= AES_FLAGS_DMA;
- err = atmel_aes_crypt_dma(dd);
+ err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
+
+ if (err && (dd->flags & AES_FLAGS_FAST)) {
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ }
return err;
}
@@ -410,6 +509,8 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
valmr |= AES_MR_CFBS_32b;
else if (dd->flags & AES_FLAGS_CFB64)
valmr |= AES_MR_CFBS_64b;
+ else if (dd->flags & AES_FLAGS_CFB128)
+ valmr |= AES_MR_CFBS_128b;
} else if (dd->flags & AES_FLAGS_OFB) {
valmr |= AES_MR_OPMOD_OFB;
} else if (dd->flags & AES_FLAGS_CTR) {
@@ -423,7 +524,7 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
valmr |= AES_MR_SMOD_IDATAR0;
- if (dd->flags & AES_FLAGS_DUALBUFF)
+ if (dd->caps.has_dualbuff)
valmr |= AES_MR_DUALBUFF;
} else {
valmr |= AES_MR_SMOD_AUTO;
@@ -477,7 +578,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
/* assign new request to device */
dd->req = req;
dd->total = req->nbytes;
+ dd->in_offset = 0;
dd->in_sg = req->src;
+ dd->out_offset = 0;
dd->out_sg = req->dst;
rctx = ablkcipher_request_ctx(req);
@@ -506,18 +609,86 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
{
int err = -EINVAL;
+ size_t count;
if (dd->flags & AES_FLAGS_DMA) {
- dma_unmap_sg(dd->dev, dd->out_sg,
- dd->nb_out_sg, DMA_FROM_DEVICE);
- dma_unmap_sg(dd->dev, dd->in_sg,
- dd->nb_in_sg, DMA_TO_DEVICE);
err = 0;
+ if (dd->flags & AES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
}
return err;
}
+
+static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
+{
+ int err = -ENOMEM;
+
+ dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buflen = PAGE_SIZE;
+ dd->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ if (!dd->buf_in || !dd->buf_out) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ goto err_alloc;
+ }
+
+ /* MAP here */
+ dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
+ dd->buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_in;
+ }
+
+ dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
+ dd->buflen, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_out;
+ }
+
+ return 0;
+
+err_map_out:
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+err_map_in:
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+err_alloc:
+ if (err)
+ pr_err("error: %d\n", err);
+ return err;
+}
+
+static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
+{
+ dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+}
+
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -525,9 +696,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct atmel_aes_dev *dd;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
- return -EINVAL;
+ if (mode & AES_FLAGS_CFB8) {
+ if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB8 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB8_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB16) {
+ if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB16 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB16_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB32) {
+ if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB32 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB32_BLOCK_SIZE;
+ } else {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = AES_BLOCK_SIZE;
}
dd = atmel_aes_find_dev(ctx);
@@ -551,14 +743,12 @@ static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
}
}
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
+ struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- struct aes_platform_data *pdata;
dma_cap_mask_t mask_in, mask_out;
- pdata = dd->dev->platform_data;
-
if (pdata && pdata->dma_slave->txdata.dma_dev &&
pdata->dma_slave->rxdata.dma_dev) {
@@ -568,28 +758,38 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
dd->dma_lch_in.chan = dma_request_channel(mask_in,
atmel_aes_filter, &pdata->dma_slave->rxdata);
+
if (!dd->dma_lch_in.chan)
goto err_dma_in;
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
AES_IDATAR(0);
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
dma_cap_zero(mask_out);
dma_cap_set(DMA_SLAVE, mask_out);
dd->dma_lch_out.chan = dma_request_channel(mask_out,
atmel_aes_filter, &pdata->dma_slave->txdata);
+
if (!dd->dma_lch_out.chan)
goto err_dma_out;
dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
AES_ODATAR(0);
- dd->dma_lch_out.dma_conf.src_maxburst = 1;
- dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_out.dma_conf.device_fc = false;
return 0;
@@ -665,13 +865,13 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
- AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
- AES_FLAGS_CFB);
+ AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
@@ -753,7 +953,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -773,7 +973,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -794,7 +994,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -815,7 +1015,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -836,7 +1036,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -857,7 +1057,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -899,7 +1099,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -915,15 +1115,14 @@ static struct crypto_alg aes_algs[] = {
},
};
-static struct crypto_alg aes_cfb64_alg[] = {
-{
+static struct crypto_alg aes_cfb64_alg = {
.cra_name = "cfb64(aes)",
.cra_driver_name = "atmel-cfb64-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -936,7 +1135,6 @@ static struct crypto_alg aes_cfb64_alg[] = {
.encrypt = atmel_aes_cfb64_encrypt,
.decrypt = atmel_aes_cfb64_decrypt,
}
-},
};
static void atmel_aes_queue_task(unsigned long data)
@@ -969,7 +1167,14 @@ static void atmel_aes_done_task(unsigned long data)
err = dd->err ? : err;
if (dd->total && !err) {
- err = atmel_aes_crypt_dma_start(dd);
+ if (dd->flags & AES_FLAGS_FAST) {
+ dd->in_sg = sg_next(dd->in_sg);
+ dd->out_sg = sg_next(dd->out_sg);
+ if (!dd->in_sg || !dd->out_sg)
+ err = -EINVAL;
+ }
+ if (!err)
+ err = atmel_aes_crypt_dma_start(dd);
if (!err)
return; /* DMA started. Not fininishing. */
}
@@ -1003,8 +1208,8 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- if (dd->hw_version >= 0x130)
- crypto_unregister_alg(&aes_cfb64_alg[0]);
+ if (dd->caps.has_cfb64)
+ crypto_unregister_alg(&aes_cfb64_alg);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -1017,10 +1222,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_algs;
}
- atmel_aes_hw_version_init(dd);
-
- if (dd->hw_version >= 0x130) {
- err = crypto_register_alg(&aes_cfb64_alg[0]);
+ if (dd->caps.has_cfb64) {
+ err = crypto_register_alg(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
@@ -1036,10 +1239,32 @@ err_aes_algs:
return err;
}
+static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
+{
+ dd->caps.has_dualbuff = 0;
+ dd->caps.has_cfb64 = 0;
+ dd->caps.max_burst_size = 1;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xff0) {
+ case 0x130:
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_cfb64 = 1;
+ dd->caps.max_burst_size = 4;
+ break;
+ case 0x120:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged aes version, set minimum capabilities\n");
+ break;
+ }
+}
+
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct aes_platform_data *pdata;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
unsigned long aes_phys_size;
@@ -1099,7 +1324,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- aes_dd->iclk = clk_get(&pdev->dev, NULL);
+ aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
@@ -1113,7 +1338,15 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto aes_io_err;
}
- err = atmel_aes_dma_init(aes_dd);
+ atmel_aes_hw_version_init(aes_dd);
+
+ atmel_aes_get_cap(aes_dd);
+
+ err = atmel_aes_buff_init(aes_dd);
+ if (err)
+ goto err_aes_buff;
+
+ err = atmel_aes_dma_init(aes_dd, pdata);
if (err)
goto err_aes_dma;
@@ -1135,6 +1368,8 @@ err_algs:
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
err_aes_dma:
+ atmel_aes_buff_cleanup(aes_dd);
+err_aes_buff:
iounmap(aes_dd->io_base);
aes_io_err:
clk_put(aes_dd->iclk);
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index dc53a20..83b2d74 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -14,10 +14,13 @@
#define SHA_MR_MODE_MANUAL 0x0
#define SHA_MR_MODE_AUTO 0x1
#define SHA_MR_MODE_PDC 0x2
-#define SHA_MR_DUALBUFF (1 << 3)
#define SHA_MR_PROCDLY (1 << 4)
#define SHA_MR_ALGO_SHA1 (0 << 8)
#define SHA_MR_ALGO_SHA256 (1 << 8)
+#define SHA_MR_ALGO_SHA384 (2 << 8)
+#define SHA_MR_ALGO_SHA512 (3 << 8)
+#define SHA_MR_ALGO_SHA224 (4 << 8)
+#define SHA_MR_DUALBUFF (1 << 16)
#define SHA_IER 0x10
#define SHA_IDR 0x14
@@ -33,6 +36,8 @@
#define SHA_ISR_URAT_MR (0x2 << 12)
#define SHA_ISR_URAT_WO (0x5 << 12)
+#define SHA_HW_VERSION 0xFC
+
#define SHA_TPR 0x108
#define SHA_TCR 0x10C
#define SHA_TNPR 0x118
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4918e94..eaed8bf 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -38,6 +38,7 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
/* SHA flags */
@@ -52,11 +53,12 @@
#define SHA_FLAGS_FINUP BIT(16)
#define SHA_FLAGS_SG BIT(17)
#define SHA_FLAGS_SHA1 BIT(18)
-#define SHA_FLAGS_SHA256 BIT(19)
-#define SHA_FLAGS_ERROR BIT(20)
-#define SHA_FLAGS_PAD BIT(21)
-
-#define SHA_FLAGS_DUALBUFF BIT(24)
+#define SHA_FLAGS_SHA224 BIT(19)
+#define SHA_FLAGS_SHA256 BIT(20)
+#define SHA_FLAGS_SHA384 BIT(21)
+#define SHA_FLAGS_SHA512 BIT(22)
+#define SHA_FLAGS_ERROR BIT(23)
+#define SHA_FLAGS_PAD BIT(24)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
@@ -65,6 +67,12 @@
#define ATMEL_SHA_DMA_THRESHOLD 56
+struct atmel_sha_caps {
+ bool has_dma;
+ bool has_dualbuff;
+ bool has_sha224;
+ bool has_sha_384_512;
+};
struct atmel_sha_dev;
@@ -73,8 +81,8 @@ struct atmel_sha_reqctx {
unsigned long flags;
unsigned long op;
- u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
- size_t digcnt;
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
+ u64 digcnt[2];
size_t bufcnt;
size_t buflen;
dma_addr_t dma_addr;
@@ -84,6 +92,8 @@ struct atmel_sha_reqctx {
unsigned int offset; /* offset in current sg */
unsigned int total; /* total request */
+ size_t block_size;
+
u8 buffer[0] __aligned(sizeof(u32));
};
@@ -97,7 +107,12 @@ struct atmel_sha_ctx {
};
-#define ATMEL_SHA_QUEUE_LENGTH 1
+#define ATMEL_SHA_QUEUE_LENGTH 50
+
+struct atmel_sha_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
struct atmel_sha_dev {
struct list_head list;
@@ -114,6 +129,12 @@ struct atmel_sha_dev {
unsigned long flags;
struct crypto_queue queue;
struct ahash_request *req;
+
+ struct atmel_sha_dma dma_lch_in;
+
+ struct atmel_sha_caps caps;
+
+ u32 hw_version;
};
struct atmel_sha_drv {
@@ -137,14 +158,6 @@ static inline void atmel_sha_write(struct atmel_sha_dev *dd,
writel_relaxed(value, dd->io_base + offset);
}
-static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
-{
- atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
-
- if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
- dd->flags |= SHA_FLAGS_DUALBUFF;
-}
-
static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
{
size_t count;
@@ -176,31 +189,58 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
}
/*
- * The purpose of this padding is to ensure that the padded message
- * is a multiple of 512 bits. The bit "1" is appended at the end of
- * the message followed by "padlen-1" zero bits. Then a 64 bits block
- * equals to the message length in bits is appended.
+ * The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
*
- * padlen is calculated as followed:
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
* - if message length < 56 bytes then padlen = 56 - message length
* - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
*/
static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
{
unsigned int index, padlen;
- u64 bits;
- u64 size;
-
- bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
- size = cpu_to_be64(bits);
-
- index = ctx->bufcnt & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
- ctx->bufcnt += padlen + 8;
- ctx->flags |= SHA_FLAGS_PAD;
+ u64 bits[2];
+ u64 size[2];
+
+ size[0] = ctx->digcnt[0];
+ size[1] = ctx->digcnt[1];
+
+ size[0] += ctx->bufcnt;
+ if (size[0] < ctx->bufcnt)
+ size[1]++;
+
+ size[0] += length;
+ if (size[0] < length)
+ size[1]++;
+
+ bits[1] = cpu_to_be64(size[0] << 3);
+ bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
+
+ if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
+ index = ctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128+112) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
+ ctx->bufcnt += padlen + 16;
+ ctx->flags |= SHA_FLAGS_PAD;
+ } else {
+ index = ctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
+ ctx->bufcnt += padlen + 8;
+ ctx->flags |= SHA_FLAGS_PAD;
+ }
}
static int atmel_sha_init(struct ahash_request *req)
@@ -231,13 +271,35 @@ static int atmel_sha_init(struct ahash_request *req)
dev_dbg(dd->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
- if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
ctx->flags |= SHA_FLAGS_SHA1;
- else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
+ ctx->block_size = SHA1_BLOCK_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA224;
+ ctx->block_size = SHA224_BLOCK_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
ctx->flags |= SHA_FLAGS_SHA256;
+ ctx->block_size = SHA256_BLOCK_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA384;
+ ctx->block_size = SHA384_BLOCK_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA512;
+ ctx->block_size = SHA512_BLOCK_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
ctx->bufcnt = 0;
- ctx->digcnt = 0;
+ ctx->digcnt[0] = 0;
+ ctx->digcnt[1] = 0;
ctx->buflen = SHA_BUFFER_LEN;
return 0;
@@ -249,19 +311,28 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
if (likely(dma)) {
- atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
+ if (!dd->caps.has_dma)
+ atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
valmr = SHA_MR_MODE_PDC;
- if (dd->flags & SHA_FLAGS_DUALBUFF)
- valmr = SHA_MR_DUALBUFF;
+ if (dd->caps.has_dualbuff)
+ valmr |= SHA_MR_DUALBUFF;
} else {
atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
}
- if (ctx->flags & SHA_FLAGS_SHA256)
+ if (ctx->flags & SHA_FLAGS_SHA1)
+ valmr |= SHA_MR_ALGO_SHA1;
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ valmr |= SHA_MR_ALGO_SHA224;
+ else if (ctx->flags & SHA_FLAGS_SHA256)
valmr |= SHA_MR_ALGO_SHA256;
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ valmr |= SHA_MR_ALGO_SHA384;
+ else if (ctx->flags & SHA_FLAGS_SHA512)
+ valmr |= SHA_MR_ALGO_SHA512;
/* Setting CR_FIRST only for the first iteration */
- if (!ctx->digcnt)
+ if (!(ctx->digcnt[0] || ctx->digcnt[1]))
valcr = SHA_CR_FIRST;
atmel_sha_write(dd, SHA_CR, valcr);
@@ -275,13 +346,15 @@ static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
int count, len32;
const u32 *buffer = (const u32 *)buf;
- dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
- ctx->digcnt, length, final);
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length, final);
atmel_sha_write_ctrl(dd, 0);
/* should be non-zero before next lines to disable clocks later */
- ctx->digcnt += length;
+ ctx->digcnt[0] += length;
+ if (ctx->digcnt[0] < length)
+ ctx->digcnt[1]++;
if (final)
dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
@@ -302,8 +375,8 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
int len32;
- dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
- ctx->digcnt, length1, final);
+ dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length1, final);
len32 = DIV_ROUND_UP(length1, sizeof(u32));
atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
@@ -317,7 +390,9 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
atmel_sha_write_ctrl(dd, 1);
/* should be non-zero before next lines to disable clocks later */
- ctx->digcnt += length1;
+ ctx->digcnt[0] += length1;
+ if (ctx->digcnt[0] < length1)
+ ctx->digcnt[1]++;
if (final)
dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
@@ -330,6 +405,86 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
return -EINPROGRESS;
}
+static void atmel_sha_dma_callback(void *data)
+{
+ struct atmel_sha_dev *dd = data;
+
+ /* dma_lch_in - completed - wait DATRDY */
+ atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+}
+
+static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ struct dma_async_tx_descriptor *in_desc;
+ struct scatterlist sg[2];
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length1, final);
+
+ if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 |
+ SHA_FLAGS_SHA256)) {
+ dd->dma_lch_in.dma_conf.src_maxburst = 16;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 16;
+ } else {
+ dd->dma_lch_in.dma_conf.src_maxburst = 32;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 32;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+
+ if (length2) {
+ sg_init_table(sg, 2);
+ sg_dma_address(&sg[0]) = dma_addr1;
+ sg_dma_len(&sg[0]) = length1;
+ sg_dma_address(&sg[1]) = dma_addr2;
+ sg_dma_len(&sg[1]) = length2;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ } else {
+ sg_init_table(sg, 1);
+ sg_dma_address(&sg[0]) = dma_addr1;
+ sg_dma_len(&sg[0]) = length1;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+ if (!in_desc)
+ return -EINVAL;
+
+ in_desc->callback = atmel_sha_dma_callback;
+ in_desc->callback_param = dd;
+
+ atmel_sha_write_ctrl(dd, 1);
+
+ /* should be non-zero before next lines to disable clocks later */
+ ctx->digcnt[0] += length1;
+ if (ctx->digcnt[0] < length1)
+ ctx->digcnt[1]++;
+
+ if (final)
+ dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= SHA_FLAGS_DMA_ACTIVE;
+
+ /* Start DMA transfer */
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ if (dd->caps.has_dma)
+ return atmel_sha_xmit_dma(dd, dma_addr1, length1,
+ dma_addr2, length2, final);
+ else
+ return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
+ dma_addr2, length2, final);
+}
+
static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -337,7 +492,6 @@ static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
atmel_sha_append_sg(ctx);
atmel_sha_fill_padding(ctx, 0);
-
bufcnt = ctx->bufcnt;
ctx->bufcnt = 0;
@@ -349,17 +503,17 @@ static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
size_t length, int final)
{
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
- SHA1_BLOCK_SIZE);
+ ctx->block_size);
return -EINVAL;
}
ctx->flags &= ~SHA_FLAGS_SG;
/* next call does not fail... so no unmap in the case of error */
- return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
+ return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
}
static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
@@ -372,8 +526,8 @@ static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
- dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
- ctx->bufcnt, ctx->digcnt, final);
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
+ ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
if (final)
atmel_sha_fill_padding(ctx, 0);
@@ -400,30 +554,25 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
if (ctx->bufcnt || ctx->offset)
return atmel_sha_update_dma_slow(dd);
- dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
- ctx->digcnt, ctx->bufcnt, ctx->total);
+ dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
+ ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
sg = ctx->sg;
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
return atmel_sha_update_dma_slow(dd);
- if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
- /* size is not SHA1_BLOCK_SIZE aligned */
+ if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
+ /* size is not ctx->block_size aligned */
return atmel_sha_update_dma_slow(dd);
length = min(ctx->total, sg->length);
if (sg_is_last(sg)) {
if (!(ctx->flags & SHA_FLAGS_FINUP)) {
- /* not last sg must be SHA1_BLOCK_SIZE aligned */
- tail = length & (SHA1_BLOCK_SIZE - 1);
+ /* not last sg must be ctx->block_size aligned */
+ tail = length & (ctx->block_size - 1);
length -= tail;
- if (length == 0) {
- /* offset where to start slow */
- ctx->offset = length;
- return atmel_sha_update_dma_slow(dd);
- }
}
}
@@ -434,7 +583,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
/* Add padding */
if (final) {
- tail = length & (SHA1_BLOCK_SIZE - 1);
+ tail = length & (ctx->block_size - 1);
length -= tail;
ctx->total += tail;
ctx->offset = length; /* offset where to start slow */
@@ -445,10 +594,10 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
atmel_sha_fill_padding(ctx, length);
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dev_err(dd->dev, "dma %u bytes error\n",
- ctx->buflen + SHA1_BLOCK_SIZE);
+ ctx->buflen + ctx->block_size);
return -EINVAL;
}
@@ -456,7 +605,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
ctx->flags &= ~SHA_FLAGS_SG;
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
+ return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
0, final);
} else {
ctx->sg = sg;
@@ -470,7 +619,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
+ return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
length, ctx->dma_addr, count, final);
}
}
@@ -483,7 +632,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
ctx->flags |= SHA_FLAGS_SG;
/* next call does not fail... so no unmap in the case of error */
- return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
+ return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
0, final);
}
@@ -498,12 +647,13 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
if (ctx->sg)
ctx->offset = 0;
}
- if (ctx->flags & SHA_FLAGS_PAD)
+ if (ctx->flags & SHA_FLAGS_PAD) {
dma_unmap_single(dd->dev, ctx->dma_addr,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
+ }
} else {
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
- SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->block_size, DMA_TO_DEVICE);
}
return 0;
@@ -515,8 +665,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err;
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
+ ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
if (ctx->flags & SHA_FLAGS_CPU)
err = atmel_sha_update_cpu(dd);
@@ -524,8 +674,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd)
err = atmel_sha_update_dma_start(dd);
/* wait for dma completion before can take more data */
- dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
- err, ctx->digcnt);
+ dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
+ err, ctx->digcnt[1], ctx->digcnt[0]);
return err;
}
@@ -562,12 +712,21 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
u32 *hash = (u32 *)ctx->digest;
int i;
- if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ if (ctx->flags & SHA_FLAGS_SHA1)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else if (ctx->flags & SHA_FLAGS_SHA256)
for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
}
static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -577,10 +736,16 @@ static void atmel_sha_copy_ready_hash(struct ahash_request *req)
if (!req->result)
return;
- if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ if (ctx->flags & SHA_FLAGS_SHA1)
memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
- else
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+ else if (ctx->flags & SHA_FLAGS_SHA256)
memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+ else
+ memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
}
static int atmel_sha_finish(struct ahash_request *req)
@@ -589,11 +754,11 @@ static int atmel_sha_finish(struct ahash_request *req)
struct atmel_sha_dev *dd = ctx->dd;
int err = 0;
- if (ctx->digcnt)
+ if (ctx->digcnt[0] || ctx->digcnt[1])
atmel_sha_copy_ready_hash(req);
- dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
- ctx->bufcnt);
+ dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
+ ctx->digcnt[0], ctx->bufcnt);
return err;
}
@@ -628,9 +793,8 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
{
clk_prepare_enable(dd->iclk);
- if (SHA_FLAGS_INIT & dd->flags) {
+ if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
- atmel_sha_dualbuff_test(dd);
dd->flags |= SHA_FLAGS_INIT;
dd->err = 0;
}
@@ -638,6 +802,23 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
return 0;
}
+static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
+{
+ return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+{
+ atmel_sha_hw_init(dd);
+
+ dd->hw_version = atmel_sha_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
struct ahash_request *req)
{
@@ -682,10 +863,9 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
if (ctx->op == SHA_OP_UPDATE) {
err = atmel_sha_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
+ if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
/* no final() after finup() */
err = atmel_sha_final_req(dd);
- }
} else if (ctx->op == SHA_OP_FINAL) {
err = atmel_sha_final_req(dd);
}
@@ -808,7 +988,7 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct atmel_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
return 0;
}
@@ -826,7 +1006,7 @@ static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
tctx->fallback = NULL;
}
-static struct ahash_alg sha_algs[] = {
+static struct ahash_alg sha_1_256_algs[] = {
{
.init = atmel_sha_init,
.update = atmel_sha_update,
@@ -875,6 +1055,79 @@ static struct ahash_alg sha_algs[] = {
},
};
+static struct ahash_alg sha_224_alg = {
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "atmel-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+};
+
+static struct ahash_alg sha_384_512_algs[] = {
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "atmel-sha384",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0x3,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "atmel-sha512",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0x3,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+};
+
static void atmel_sha_done_task(unsigned long data)
{
struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
@@ -941,32 +1194,142 @@ static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
{
int i;
- for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
- crypto_unregister_ahash(&sha_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
+ crypto_unregister_ahash(&sha_1_256_algs[i]);
+
+ if (dd->caps.has_sha224)
+ crypto_unregister_ahash(&sha_224_alg);
+
+ if (dd->caps.has_sha_384_512) {
+ for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
+ crypto_unregister_ahash(&sha_384_512_algs[i]);
+ }
}
static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
{
int err, i, j;
- for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
- err = crypto_register_ahash(&sha_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
- goto err_sha_algs;
+ goto err_sha_1_256_algs;
+ }
+
+ if (dd->caps.has_sha224) {
+ err = crypto_register_ahash(&sha_224_alg);
+ if (err)
+ goto err_sha_224_algs;
+ }
+
+ if (dd->caps.has_sha_384_512) {
+ for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ err = crypto_register_ahash(&sha_384_512_algs[i]);
+ if (err)
+ goto err_sha_384_512_algs;
+ }
}
return 0;
-err_sha_algs:
+err_sha_384_512_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&sha_384_512_algs[j]);
+ crypto_unregister_ahash(&sha_224_alg);
+err_sha_224_algs:
+ i = ARRAY_SIZE(sha_1_256_algs);
+err_sha_1_256_algs:
for (j = 0; j < i; j++)
- crypto_unregister_ahash(&sha_algs[j]);
+ crypto_unregister_ahash(&sha_1_256_algs[j]);
return err;
}
+static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
+ struct crypto_platform_data *pdata)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask_in;
+
+ if (pdata && pdata->dma_slave->rxdata.dma_dev) {
+ /* Try to grab DMA channel */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
+
+ dd->dma_lch_in.chan = dma_request_channel(mask_in,
+ atmel_sha_filter, &pdata->dma_slave->rxdata);
+
+ if (!dd->dma_lch_in.chan)
+ return err;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ SHA_REG_DIN(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+}
+
+static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
+{
+
+ dd->caps.has_dma = 0;
+ dd->caps.has_dualbuff = 0;
+ dd->caps.has_sha224 = 0;
+ dd->caps.has_sha_384_512 = 0;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xff0) {
+ case 0x410:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ break;
+ case 0x400:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ break;
+ case 0x320:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged sha version, set minimum capabilities\n");
+ break;
+ }
+}
+
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
unsigned long sha_phys_size;
@@ -1018,7 +1381,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- sha_dd->iclk = clk_get(&pdev->dev, NULL);
+ sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
@@ -1032,6 +1395,22 @@ static int atmel_sha_probe(struct platform_device *pdev)
goto sha_io_err;
}
+ atmel_sha_hw_version_init(sha_dd);
+
+ atmel_sha_get_cap(sha_dd);
+
+ if (sha_dd->caps.has_dma) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = -ENXIO;
+ goto err_pdata;
+ }
+ err = atmel_sha_dma_init(sha_dd, pdata);
+ if (err)
+ goto err_sha_dma;
+ }
+
spin_lock(&atmel_sha.lock);
list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
spin_unlock(&atmel_sha.lock);
@@ -1048,6 +1427,10 @@ err_algs:
spin_lock(&atmel_sha.lock);
list_del(&sha_dd->list);
spin_unlock(&atmel_sha.lock);
+ if (sha_dd->caps.has_dma)
+ atmel_sha_dma_cleanup(sha_dd);
+err_sha_dma:
+err_pdata:
iounmap(sha_dd->io_base);
sha_io_err:
clk_put(sha_dd->iclk);
@@ -1078,6 +1461,9 @@ static int atmel_sha_remove(struct platform_device *pdev)
tasklet_kill(&sha_dd->done_task);
+ if (sha_dd->caps.has_dma)
+ atmel_sha_dma_cleanup(sha_dd);
+
iounmap(sha_dd->io_base);
clk_put(sha_dd->iclk);
@@ -1102,6 +1488,6 @@ static struct platform_driver atmel_sha_driver = {
module_platform_driver(atmel_sha_driver);
-MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
+MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
index 5ac2a90..f86734d 100644
--- a/drivers/crypto/atmel-tdes-regs.h
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -69,6 +69,8 @@
#define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0)
#define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0
+#define TDES_HW_VERSION 0xFC
+
#define TDES_RPR 0x100
#define TDES_RCR 0x104
#define TDES_TPR 0x108
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 7c73fbb..4a99564 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -38,29 +38,35 @@
#include <crypto/des.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x007f
+#define TDES_FLAGS_MODE_MASK 0x00ff
#define TDES_FLAGS_ENCRYPT BIT(0)
#define TDES_FLAGS_CBC BIT(1)
#define TDES_FLAGS_CFB BIT(2)
#define TDES_FLAGS_CFB8 BIT(3)
#define TDES_FLAGS_CFB16 BIT(4)
#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_OFB BIT(6)
+#define TDES_FLAGS_CFB64 BIT(6)
+#define TDES_FLAGS_OFB BIT(7)
#define TDES_FLAGS_INIT BIT(16)
#define TDES_FLAGS_FAST BIT(17)
#define TDES_FLAGS_BUSY BIT(18)
+#define TDES_FLAGS_DMA BIT(19)
-#define ATMEL_TDES_QUEUE_LENGTH 1
+#define ATMEL_TDES_QUEUE_LENGTH 50
#define CFB8_BLOCK_SIZE 1
#define CFB16_BLOCK_SIZE 2
#define CFB32_BLOCK_SIZE 4
-#define CFB64_BLOCK_SIZE 8
+struct atmel_tdes_caps {
+ bool has_dma;
+ u32 has_cfb_3keys;
+};
struct atmel_tdes_dev;
@@ -70,12 +76,19 @@ struct atmel_tdes_ctx {
int keylen;
u32 key[3*DES_KEY_SIZE / sizeof(u32)];
unsigned long flags;
+
+ u16 block_size;
};
struct atmel_tdes_reqctx {
unsigned long mode;
};
+struct atmel_tdes_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
+
struct atmel_tdes_dev {
struct list_head list;
unsigned long phys_base;
@@ -99,8 +112,10 @@ struct atmel_tdes_dev {
size_t total;
struct scatterlist *in_sg;
+ unsigned int nb_in_sg;
size_t in_offset;
struct scatterlist *out_sg;
+ unsigned int nb_out_sg;
size_t out_offset;
size_t buflen;
@@ -109,10 +124,16 @@ struct atmel_tdes_dev {
void *buf_in;
int dma_in;
dma_addr_t dma_addr_in;
+ struct atmel_tdes_dma dma_lch_in;
void *buf_out;
int dma_out;
dma_addr_t dma_addr_out;
+ struct atmel_tdes_dma dma_lch_out;
+
+ struct atmel_tdes_caps caps;
+
+ u32 hw_version;
};
struct atmel_tdes_drv {
@@ -207,6 +228,31 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
return 0;
}
+static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
+{
+ return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+{
+ atmel_tdes_hw_init(dd);
+
+ dd->hw_version = atmel_tdes_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
+static void atmel_tdes_dma_callback(void *data)
+{
+ struct atmel_tdes_dev *dd = data;
+
+ /* dma_lch_out - completed */
+ tasklet_schedule(&dd->done_task);
+}
+
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
@@ -217,7 +263,9 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
if (err)
return err;
- atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+ if (!dd->caps.has_dma)
+ atmel_tdes_write(dd, TDES_PTCR,
+ TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
/* MR register must be set before IV registers */
if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
@@ -241,6 +289,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_CFBS_16b;
else if (dd->flags & TDES_FLAGS_CFB32)
valmr |= TDES_MR_CFBS_32b;
+ else if (dd->flags & TDES_FLAGS_CFB64)
+ valmr |= TDES_MR_CFBS_64b;
} else if (dd->flags & TDES_FLAGS_OFB) {
valmr |= TDES_MR_OPMOD_OFB;
}
@@ -262,7 +312,7 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
return 0;
}
-static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
{
int err = 0;
size_t count;
@@ -288,7 +338,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
return err;
}
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
{
int err = -ENOMEM;
@@ -333,7 +383,7 @@ err_alloc:
return err;
}
-static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
{
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
@@ -343,7 +393,7 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -379,7 +429,76 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
+static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
+{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd = ctx->dd;
+ struct scatterlist sg[2];
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+
+ dd->dma_size = length;
+
+ if (!(dd->flags & TDES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
+
+ if (dd->flags & TDES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & TDES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+
+ dd->flags |= TDES_FLAGS_DMA;
+
+ sg_init_table(&sg[0], 1);
+ sg_dma_address(&sg[0]) = dma_addr_in;
+ sg_dma_len(&sg[0]) = length;
+
+ sg_init_table(&sg[1], 1);
+ sg_dma_address(&sg[1]) = dma_addr_out;
+ sg_dma_len(&sg[1]) = length;
+
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc)
+ return -EINVAL;
+
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!out_desc)
+ return -EINVAL;
+
+ out_desc->callback = atmel_tdes_dma_callback;
+ out_desc->callback_param = dd;
+
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(dd->dma_lch_out.chan);
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return 0;
+}
+
+static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
crypto_ablkcipher_reqtfm(dd->req));
@@ -387,23 +506,23 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
size_t count;
dma_addr_t addr_in, addr_out;
- if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
+ if ((!dd->in_offset) && (!dd->out_offset)) {
/* check for alignment */
- in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
- out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
-
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
fast = in && out;
+
+ if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+ fast = 0;
}
+
if (fast) {
count = min(dd->total, sg_dma_len(dd->in_sg));
count = min(count, sg_dma_len(dd->out_sg));
- if (count != dd->total) {
- pr_err("request length != buffer length\n");
- return -EINVAL;
- }
-
err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
if (!err) {
dev_err(dd->dev, "dma_map_sg() error\n");
@@ -433,13 +552,16 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
addr_out = dd->dma_addr_out;
dd->flags &= ~TDES_FLAGS_FAST;
-
}
dd->total -= count;
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
- if (err) {
+ if (dd->caps.has_dma)
+ err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ else
+ err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+
+ if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
}
@@ -447,7 +569,6 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
return err;
}
-
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct ablkcipher_request *req = dd->req;
@@ -506,7 +627,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
err = atmel_tdes_write_ctrl(dd);
if (!err)
- err = atmel_tdes_crypt_dma_start(dd);
+ err = atmel_tdes_crypt_start(dd);
if (err) {
/* des_task will not finish it, so do it here */
atmel_tdes_finish_req(dd, err);
@@ -516,41 +637,145 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
return ret;
}
+static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+{
+ int err = -EINVAL;
+ size_t count;
+
+ if (dd->flags & TDES_FLAGS_DMA) {
+ err = 0;
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
+ }
+ return err;
+}
static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct atmel_tdes_dev *dd;
if (mode & TDES_FLAGS_CFB8) {
if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
+ ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB16) {
if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
+ ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB32) {
if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
- } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
- return -EINVAL;
+ ctx->block_size = CFB32_BLOCK_SIZE;
+ } else {
+ if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of DES blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = DES_BLOCK_SIZE;
}
- dd = atmel_tdes_find_dev(ctx);
- if (!dd)
+ rctx->mode = mode;
+
+ return atmel_tdes_handle_queue(ctx->dd, req);
+}
+
+static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
+ struct crypto_platform_data *pdata)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask_in, mask_out;
+
+ if (pdata && pdata->dma_slave->txdata.dma_dev &&
+ pdata->dma_slave->rxdata.dma_dev) {
+
+ /* Try to grab 2 DMA channels */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
+
+ dd->dma_lch_in.chan = dma_request_channel(mask_in,
+ atmel_tdes_filter, &pdata->dma_slave->rxdata);
+
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ TDES_IDATA1R;
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dma_cap_zero(mask_out);
+ dma_cap_set(DMA_SLAVE, mask_out);
+ dd->dma_lch_out.chan = dma_request_channel(mask_out,
+ atmel_tdes_filter, &pdata->dma_slave->txdata);
+
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ TDES_ODATA1R;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
+
+ return 0;
+ } else {
return -ENODEV;
+ }
- rctx->mode = mode;
+err_dma_out:
+ dma_release_channel(dd->dma_lch_in.chan);
+err_dma_in:
+ return err;
+}
- return atmel_tdes_handle_queue(dd, req);
+static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+ dma_release_channel(dd->dma_lch_out.chan);
}
static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -590,7 +815,8 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* HW bug in cfb 3-keys mode.
*/
- if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) {
+ if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
+ && (keylen != 2*DES_KEY_SIZE)) {
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
@@ -678,8 +904,15 @@ static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd;
+
tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+ dd = atmel_tdes_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+
return 0;
}
@@ -695,7 +928,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -715,7 +948,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -736,7 +969,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -778,7 +1011,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -799,7 +1032,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -820,7 +1053,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -841,7 +1074,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -861,7 +1094,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -882,7 +1115,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -924,7 +1157,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -945,7 +1178,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -966,7 +1199,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -994,14 +1227,24 @@ static void atmel_tdes_done_task(unsigned long data)
struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
int err;
- err = atmel_tdes_crypt_dma_stop(dd);
+ if (!(dd->flags & TDES_FLAGS_DMA))
+ err = atmel_tdes_crypt_pdc_stop(dd);
+ else
+ err = atmel_tdes_crypt_dma_stop(dd);
err = dd->err ? : err;
if (dd->total && !err) {
- err = atmel_tdes_crypt_dma_start(dd);
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dd->in_sg = sg_next(dd->in_sg);
+ dd->out_sg = sg_next(dd->out_sg);
+ if (!dd->in_sg || !dd->out_sg)
+ err = -EINVAL;
+ }
if (!err)
- return;
+ err = atmel_tdes_crypt_start(dd);
+ if (!err)
+ return; /* DMA started. Not fininishing. */
}
atmel_tdes_finish_req(dd, err);
@@ -1053,9 +1296,31 @@ err_tdes_algs:
return err;
}
+static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
+{
+
+ dd->caps.has_dma = 0;
+ dd->caps.has_cfb_3keys = 0;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xf00) {
+ case 0x700:
+ dd->caps.has_dma = 1;
+ dd->caps.has_cfb_3keys = 1;
+ break;
+ case 0x600:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged tdes version, set minimum capabilities\n");
+ break;
+ }
+}
+
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
unsigned long tdes_phys_size;
@@ -1109,7 +1374,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- tdes_dd->iclk = clk_get(&pdev->dev, NULL);
+ tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk");
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
@@ -1123,9 +1388,25 @@ static int atmel_tdes_probe(struct platform_device *pdev)
goto tdes_io_err;
}
- err = atmel_tdes_dma_init(tdes_dd);
+ atmel_tdes_hw_version_init(tdes_dd);
+
+ atmel_tdes_get_cap(tdes_dd);
+
+ err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_tdes_buff;
+
+ if (tdes_dd->caps.has_dma) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = -ENXIO;
+ goto err_pdata;
+ }
+ err = atmel_tdes_dma_init(tdes_dd, pdata);
+ if (err)
+ goto err_tdes_dma;
+ }
spin_lock(&atmel_tdes.lock);
list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
@@ -1143,8 +1424,12 @@ err_algs:
spin_lock(&atmel_tdes.lock);
list_del(&tdes_dd->list);
spin_unlock(&atmel_tdes.lock);
- atmel_tdes_dma_cleanup(tdes_dd);
+ if (tdes_dd->caps.has_dma)
+ atmel_tdes_dma_cleanup(tdes_dd);
err_tdes_dma:
+err_pdata:
+ atmel_tdes_buff_cleanup(tdes_dd);
+err_tdes_buff:
iounmap(tdes_dd->io_base);
tdes_io_err:
clk_put(tdes_dd->iclk);
@@ -1178,7 +1463,10 @@ static int atmel_tdes_remove(struct platform_device *pdev)
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
- atmel_tdes_dma_cleanup(tdes_dd);
+ if (tdes_dd->caps.has_dma)
+ atmel_tdes_dma_cleanup(tdes_dd);
+
+ atmel_tdes_buff_cleanup(tdes_dd);
iounmap(tdes_dd->io_base);
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 827913d..d797f31 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -151,7 +151,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
struct bfin_crypto_crc *crc;
- dev_dbg(crc->dev, "crc_init\n");
+ dev_dbg(ctx->crc->dev, "crc_init\n");
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
crc_ctx->crc = crc;
@@ -160,7 +160,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
spin_unlock_bh(&crc_list.lock);
if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
- dev_dbg(crc->dev, "init: requested sg list is too big > %d\n",
+ dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
CRC_MAX_DMA_DESC);
return -EINVAL;
}
@@ -175,7 +175,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
/* init crc results */
put_unaligned_le32(crc_ctx->key, req->result);
- dev_dbg(crc->dev, "init: digest size: %d\n",
+ dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 65c7668..b44091c 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -78,7 +78,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
depends on CRYPTO_DEV_FSL_CAAM
default y
- select CRYPTO_AHASH
+ select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index cf268b1..765fdf5 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1693,6 +1693,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(aes))",
.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1732,6 +1733,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(aes))",
.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1810,6 +1812,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1849,6 +1852,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1926,6 +1930,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(des))",
.driver_name = "authenc-hmac-sha224-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1965,6 +1970,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(des))",
.driver_name = "authenc-hmac-sha384-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 32aba7a..5996521 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -411,7 +411,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
@@ -420,7 +420,7 @@ static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Digest hash size if it is too large */
-static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 8acf004..6e94bcd 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -304,6 +304,9 @@ static int caam_probe(struct platform_device *pdev)
caam_remove(pdev);
return ret;
}
+
+ /* Enable RDB bit so that RNG works faster */
+ setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 30b8f74..9f25f52 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -36,7 +36,7 @@ static void report_jump_idx(u32 status, char *outstr)
static void report_ccb_status(u32 status, char *outstr)
{
- char *cha_id_list[] = {
+ static const char * const cha_id_list[] = {
"",
"AES",
"DES",
@@ -51,7 +51,7 @@ static void report_ccb_status(u32 status, char *outstr)
"ZUCE",
"ZUCA",
};
- char *err_id_list[] = {
+ static const char * const err_id_list[] = {
"No error.",
"Mode error.",
"Data size error.",
@@ -69,7 +69,7 @@ static void report_ccb_status(u32 status, char *outstr)
"Invalid CHA combination was selected",
"Invalid CHA selected.",
};
- char *rng_err_id_list[] = {
+ static const char * const rng_err_id_list[] = {
"",
"",
"",
@@ -117,7 +117,7 @@ static void report_jump_status(u32 status, char *outstr)
static void report_deco_status(u32 status, char *outstr)
{
- const struct {
+ static const struct {
u8 value;
char *error_text;
} desc_error_list[] = {
@@ -245,7 +245,7 @@ static void report_cond_code_status(u32 status, char *outstr)
char *caam_jr_strstatus(char *outstr, u32 status)
{
- struct stat_src {
+ static const struct stat_src {
void (*report_ssed)(u32 status, char *outstr);
char *error;
} status_src[] = {
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5cd4c1b..e4a16b7 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct device *parentdev; /* points back to controller dev */
+ struct platform_device *jr_pdev;/* points to platform device for JR */
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 93d1407..b4aa773e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -407,6 +407,7 @@ int caam_jr_shutdown(struct device *dev)
dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
jrp->outring, outbusaddr);
kfree(jrp->entinfo);
+ of_device_unregister(jrp->jr_pdev);
return ret;
}
@@ -454,6 +455,8 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
kfree(jrpriv);
return -EINVAL;
}
+
+ jrpriv->jr_pdev = jr_pdev;
jrdev = &jr_pdev->dev;
dev_set_drvdata(jrdev, jrpriv);
ctrlpriv->jrdev[ring] = jrdev;
@@ -472,6 +475,7 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
+ of_device_unregister(jr_pdev);
kfree(jrpriv);
return error;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index f6dba10..87138d2 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -44,7 +44,7 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index d95d290..c5588f6 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3223fc6..cd6feda 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -252,7 +252,8 @@ struct caam_ctrl {
/* Read/Writable */
u32 rsvd1;
u32 mcr; /* MCFG Master Config Register */
- u32 rsvd2[2];
+ u32 rsvd2;
+ u32 scfgr; /* SCFGR, Security Config Register */
/* Bus Access Configuration Section 010-11f */
/* Read/Writable */
@@ -299,6 +300,7 @@ struct caam_ctrl {
#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
#define MCFGR_DMA_RESET 0x10000000
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
+#define SCFGR_RDBENABLE 0x00000400
/* AXI read cache control */
#define MCFGR_ARCACHE_SHIFT 12
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 6aa425f..ee15b0f 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -636,7 +636,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- pm_runtime_put_sync(dd->dev);
+ pm_runtime_put(dd->dev);
dd->flags &= ~FLAGS_BUSY;
req->base.complete(&req->base, err);
@@ -1248,18 +1248,7 @@ static struct platform_driver omap_aes_driver = {
},
};
-static int __init omap_aes_mod_init(void)
-{
- return platform_driver_register(&omap_aes_driver);
-}
-
-static void __exit omap_aes_mod_exit(void)
-{
- platform_driver_unregister(&omap_aes_driver);
-}
-
-module_init(omap_aes_mod_init);
-module_exit(omap_aes_mod_exit);
+module_platform_driver(omap_aes_driver);
MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 3d1611f..a1e1b47 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -923,7 +923,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_put_sync(dd->dev);
+ pm_runtime_put(dd->dev);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -1813,18 +1813,7 @@ static struct platform_driver omap_sham_driver = {
},
};
-static int __init omap_sham_mod_init(void)
-{
- return platform_driver_register(&omap_sham_driver);
-}
-
-static void __exit omap_sham_mod_exit(void)
-{
- platform_driver_unregister(&omap_sham_driver);
-}
-
-module_init(omap_sham_mod_init);
-module_exit(omap_sham_mod_exit);
+module_platform_driver(omap_sham_driver);
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 2096d46..ac30724 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1688,8 +1688,6 @@ static const struct of_device_id spacc_of_id_table[] = {
{ .compatible = "picochip,spacc-l2" },
{}
};
-#else /* CONFIG_OF */
-#define spacc_of_id_table NULL
#endif /* CONFIG_OF */
static bool spacc_is_compatible(struct platform_device *pdev,
@@ -1874,7 +1872,7 @@ static struct platform_driver spacc_driver = {
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
- .of_match_table = spacc_of_id_table,
+ .of_match_table = of_match_ptr(spacc_of_id_table),
},
.id_table = spacc_id_table,
};
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
new file mode 100644
index 0000000..a97bb6c
--- /dev/null
+++ b/drivers/crypto/sahara.c
@@ -0,0 +1,1070 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for SAHARA cryptographic accelerator.
+ *
+ * Copyright (c) 2013 Vista Silicon S.L.
+ * Author: Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on omap-aes.c and tegra-aes.c
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define SAHARA_NAME "sahara"
+#define SAHARA_VERSION_3 3
+#define SAHARA_TIMEOUT_MS 1000
+#define SAHARA_MAX_HW_DESC 2
+#define SAHARA_MAX_HW_LINK 20
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_NEW_KEY BIT(3)
+#define FLAGS_BUSY 4
+
+#define SAHARA_HDR_BASE 0x00800000
+#define SAHARA_HDR_SKHA_ALG_AES 0
+#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
+#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
+#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
+#define SAHARA_HDR_FORM_DATA (5 << 16)
+#define SAHARA_HDR_FORM_KEY (8 << 16)
+#define SAHARA_HDR_LLO (1 << 24)
+#define SAHARA_HDR_CHA_SKHA (1 << 28)
+#define SAHARA_HDR_CHA_MDHA (2 << 28)
+#define SAHARA_HDR_PARITY_BIT (1 << 31)
+
+/* SAHARA can only process one request at a time */
+#define SAHARA_QUEUE_LENGTH 1
+
+#define SAHARA_REG_VERSION 0x00
+#define SAHARA_REG_DAR 0x04
+#define SAHARA_REG_CONTROL 0x08
+#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
+#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
+#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
+#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
+#define SAHARA_REG_CMD 0x0C
+#define SAHARA_CMD_RESET (1 << 0)
+#define SAHARA_CMD_CLEAR_INT (1 << 8)
+#define SAHARA_CMD_CLEAR_ERR (1 << 9)
+#define SAHARA_CMD_SINGLE_STEP (1 << 10)
+#define SAHARA_CMD_MODE_BATCH (1 << 16)
+#define SAHARA_CMD_MODE_DEBUG (1 << 18)
+#define SAHARA_REG_STATUS 0x10
+#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
+#define SAHARA_STATE_IDLE 0
+#define SAHARA_STATE_BUSY 1
+#define SAHARA_STATE_ERR 2
+#define SAHARA_STATE_FAULT 3
+#define SAHARA_STATE_COMPLETE 4
+#define SAHARA_STATE_COMP_FLAG (1 << 2)
+#define SAHARA_STATUS_DAR_FULL (1 << 3)
+#define SAHARA_STATUS_ERROR (1 << 4)
+#define SAHARA_STATUS_SECURE (1 << 5)
+#define SAHARA_STATUS_FAIL (1 << 6)
+#define SAHARA_STATUS_INIT (1 << 7)
+#define SAHARA_STATUS_RNG_RESEED (1 << 8)
+#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
+#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
+#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
+#define SAHARA_STATUS_MODE_BATCH (1 << 16)
+#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
+#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
+#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
+#define SAHARA_REG_ERRSTATUS 0x14
+#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
+#define SAHARA_ERRSOURCE_CHA 14
+#define SAHARA_ERRSOURCE_DMA 15
+#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
+#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
+#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
+#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
+#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
+#define SAHARA_REG_FADDR 0x18
+#define SAHARA_REG_CDAR 0x1C
+#define SAHARA_REG_IDAR 0x20
+
+struct sahara_hw_desc {
+ u32 hdr;
+ u32 len1;
+ dma_addr_t p1;
+ u32 len2;
+ dma_addr_t p2;
+ dma_addr_t next;
+};
+
+struct sahara_hw_link {
+ u32 len;
+ dma_addr_t p;
+ dma_addr_t next;
+};
+
+struct sahara_ctx {
+ struct sahara_dev *dev;
+ unsigned long flags;
+ int keylen;
+ u8 key[AES_KEYSIZE_128];
+ struct crypto_ablkcipher *fallback;
+};
+
+struct sahara_aes_reqctx {
+ unsigned long mode;
+};
+
+struct sahara_dev {
+ struct device *device;
+ void __iomem *regs_base;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+
+ struct sahara_ctx *ctx;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ unsigned long flags;
+
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
+ dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
+
+ u8 *key_base;
+ dma_addr_t key_phys_base;
+
+ u8 *iv_base;
+ dma_addr_t iv_phys_base;
+
+ struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
+ dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
+
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ unsigned int nb_in_sg;
+ struct scatterlist *out_sg;
+ unsigned int nb_out_sg;
+
+ u32 error;
+ struct timer_list watchdog;
+};
+
+static struct sahara_dev *dev_ptr;
+
+static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
+{
+ writel(data, dev->regs_base + reg);
+}
+
+static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
+{
+ return readl(dev->regs_base + reg);
+}
+
+static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
+{
+ u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
+ SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
+ SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
+
+ if (dev->flags & FLAGS_CBC) {
+ hdr |= SAHARA_HDR_SKHA_MODE_CBC;
+ hdr ^= SAHARA_HDR_PARITY_BIT;
+ }
+
+ if (dev->flags & FLAGS_ENCRYPT) {
+ hdr |= SAHARA_HDR_SKHA_OP_ENC;
+ hdr ^= SAHARA_HDR_PARITY_BIT;
+ }
+
+ return hdr;
+}
+
+static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
+{
+ return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
+ SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
+}
+
+static int sahara_sg_length(struct scatterlist *sg,
+ unsigned int total)
+{
+ int sg_nb;
+ unsigned int len;
+ struct scatterlist *sg_list;
+
+ sg_nb = 0;
+ sg_list = sg;
+
+ while (total) {
+ len = min(sg_list->length, total);
+
+ sg_nb++;
+ total -= len;
+
+ sg_list = sg_next(sg_list);
+ if (!sg_list)
+ total = 0;
+ }
+
+ return sg_nb;
+}
+
+static char *sahara_err_src[16] = {
+ "No error",
+ "Header error",
+ "Descriptor length error",
+ "Descriptor length or pointer error",
+ "Link length error",
+ "Link pointer error",
+ "Input buffer error",
+ "Output buffer error",
+ "Output buffer starvation",
+ "Internal state fault",
+ "General descriptor problem",
+ "Reserved",
+ "Descriptor address error",
+ "Link address error",
+ "CHA error",
+ "DMA error"
+};
+
+static char *sahara_err_dmasize[4] = {
+ "Byte transfer",
+ "Half-word transfer",
+ "Word transfer",
+ "Reserved"
+};
+
+static char *sahara_err_dmasrc[8] = {
+ "No error",
+ "AHB bus error",
+ "Internal IP bus error",
+ "Parity error",
+ "DMA crosses 256 byte boundary",
+ "DMA is busy",
+ "Reserved",
+ "DMA HW error"
+};
+
+static char *sahara_cha_errsrc[12] = {
+ "Input buffer non-empty",
+ "Illegal address",
+ "Illegal mode",
+ "Illegal data size",
+ "Illegal key size",
+ "Write during processing",
+ "CTX read during processing",
+ "HW error",
+ "Input buffer disabled/underflow",
+ "Output buffer disabled/overflow",
+ "DES key parity error",
+ "Reserved"
+};
+
+static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
+
+static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
+{
+ u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
+ u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
+
+ dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
+
+ dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
+
+ if (source == SAHARA_ERRSOURCE_DMA) {
+ if (error & SAHARA_ERRSTATUS_DMA_DIR)
+ dev_err(dev->device, " * DMA read.\n");
+ else
+ dev_err(dev->device, " * DMA write.\n");
+
+ dev_err(dev->device, " * %s.\n",
+ sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
+ dev_err(dev->device, " * %s.\n",
+ sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
+ } else if (source == SAHARA_ERRSOURCE_CHA) {
+ dev_err(dev->device, " * %s.\n",
+ sahara_cha_errsrc[chasrc]);
+ dev_err(dev->device, " * %s.\n",
+ sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
+ }
+ dev_err(dev->device, "\n");
+}
+
+static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
+
+static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
+{
+ u8 state;
+
+ if (!IS_ENABLED(DEBUG))
+ return;
+
+ state = SAHARA_STATUS_GET_STATE(status);
+
+ dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
+ __func__, status);
+
+ dev_dbg(dev->device, " - State = %d:\n", state);
+ if (state & SAHARA_STATE_COMP_FLAG)
+ dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
+
+ dev_dbg(dev->device, " * %s.\n",
+ sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
+
+ if (status & SAHARA_STATUS_DAR_FULL)
+ dev_dbg(dev->device, " - DAR Full.\n");
+ if (status & SAHARA_STATUS_ERROR)
+ dev_dbg(dev->device, " - Error.\n");
+ if (status & SAHARA_STATUS_SECURE)
+ dev_dbg(dev->device, " - Secure.\n");
+ if (status & SAHARA_STATUS_FAIL)
+ dev_dbg(dev->device, " - Fail.\n");
+ if (status & SAHARA_STATUS_RNG_RESEED)
+ dev_dbg(dev->device, " - RNG Reseed Request.\n");
+ if (status & SAHARA_STATUS_ACTIVE_RNG)
+ dev_dbg(dev->device, " - RNG Active.\n");
+ if (status & SAHARA_STATUS_ACTIVE_MDHA)
+ dev_dbg(dev->device, " - MDHA Active.\n");
+ if (status & SAHARA_STATUS_ACTIVE_SKHA)
+ dev_dbg(dev->device, " - SKHA Active.\n");
+
+ if (status & SAHARA_STATUS_MODE_BATCH)
+ dev_dbg(dev->device, " - Batch Mode.\n");
+ else if (status & SAHARA_STATUS_MODE_DEDICATED)
+ dev_dbg(dev->device, " - Decidated Mode.\n");
+ else if (status & SAHARA_STATUS_MODE_DEBUG)
+ dev_dbg(dev->device, " - Debug Mode.\n");
+
+ dev_dbg(dev->device, " - Internal state = 0x%02x\n",
+ SAHARA_STATUS_GET_ISTATE(status));
+
+ dev_dbg(dev->device, "Current DAR: 0x%08x\n",
+ sahara_read(dev, SAHARA_REG_CDAR));
+ dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
+ sahara_read(dev, SAHARA_REG_IDAR));
+}
+
+static void sahara_dump_descriptors(struct sahara_dev *dev)
+{
+ int i;
+
+ if (!IS_ENABLED(DEBUG))
+ return;
+
+ for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
+ dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
+ i, dev->hw_phys_desc[i]);
+ dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
+ dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
+ dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
+ dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
+ dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
+ dev_dbg(dev->device, "\tnext = 0x%08x\n",
+ dev->hw_desc[i]->next);
+ }
+ dev_dbg(dev->device, "\n");
+}
+
+static void sahara_dump_links(struct sahara_dev *dev)
+{
+ int i;
+
+ if (!IS_ENABLED(DEBUG))
+ return;
+
+ for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
+ dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
+ i, dev->hw_phys_link[i]);
+ dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
+ dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
+ dev_dbg(dev->device, "\tnext = 0x%08x\n",
+ dev->hw_link[i]->next);
+ }
+ dev_dbg(dev->device, "\n");
+}
+
+static void sahara_aes_done_task(unsigned long data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_FROM_DEVICE);
+
+ spin_lock(&dev->lock);
+ clear_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock(&dev->lock);
+
+ dev->req->base.complete(&dev->req->base, dev->error);
+}
+
+void sahara_watchdog(unsigned long data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
+ unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
+
+ sahara_decode_status(dev, stat);
+ sahara_decode_error(dev, err);
+ dev->error = -ETIMEDOUT;
+ sahara_aes_done_task(data);
+}
+
+static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+{
+ struct sahara_ctx *ctx = dev->ctx;
+ struct scatterlist *sg;
+ int ret;
+ int i, j;
+
+ /* Copy new key if necessary */
+ if (ctx->flags & FLAGS_NEW_KEY) {
+ memcpy(dev->key_base, ctx->key, ctx->keylen);
+ ctx->flags &= ~FLAGS_NEW_KEY;
+
+ if (dev->flags & FLAGS_CBC) {
+ dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[0]->p1 = dev->iv_phys_base;
+ } else {
+ dev->hw_desc[0]->len1 = 0;
+ dev->hw_desc[0]->p1 = 0;
+ }
+ dev->hw_desc[0]->len2 = ctx->keylen;
+ dev->hw_desc[0]->p2 = dev->key_phys_base;
+ dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+ }
+ dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
+
+ dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
+ dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
+ if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
+ dev_err(dev->device, "not enough hw links (%d)\n",
+ dev->nb_in_sg + dev->nb_out_sg);
+ return -EINVAL;
+ }
+
+ ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+ if (ret != dev->nb_in_sg) {
+ dev_err(dev->device, "couldn't map in sg\n");
+ goto unmap_in;
+ }
+ ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_FROM_DEVICE);
+ if (ret != dev->nb_out_sg) {
+ dev_err(dev->device, "couldn't map out sg\n");
+ goto unmap_out;
+ }
+
+ /* Create input links */
+ dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
+ sg = dev->in_sg;
+ for (i = 0; i < dev->nb_in_sg; i++) {
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+ }
+
+ /* Create output links */
+ dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
+ sg = dev->out_sg;
+ for (j = i; j < dev->nb_out_sg + i; j++) {
+ dev->hw_link[j]->len = sg->length;
+ dev->hw_link[j]->p = sg->dma_address;
+ if (j == (dev->nb_out_sg + i - 1)) {
+ dev->hw_link[j]->next = 0;
+ } else {
+ dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
+ sg = sg_next(sg);
+ }
+ }
+
+ /* Fill remaining fields of hw_desc[1] */
+ dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
+ dev->hw_desc[1]->len1 = dev->total;
+ dev->hw_desc[1]->len2 = dev->total;
+ dev->hw_desc[1]->next = 0;
+
+ sahara_dump_descriptors(dev);
+ sahara_dump_links(dev);
+
+ /* Start processing descriptor chain. */
+ mod_timer(&dev->watchdog,
+ jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+ return 0;
+
+unmap_out:
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_TO_DEVICE);
+unmap_in:
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_FROM_DEVICE);
+
+ return -EINVAL;
+}
+
+static void sahara_aes_queue_task(unsigned long data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct crypto_async_request *async_req, *backlog;
+ struct sahara_ctx *ctx;
+ struct sahara_aes_reqctx *rctx;
+ struct ablkcipher_request *req;
+ int ret;
+
+ spin_lock(&dev->lock);
+ backlog = crypto_get_backlog(&dev->queue);
+ async_req = crypto_dequeue_request(&dev->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock(&dev->lock);
+
+ if (!async_req)
+ return;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ /* Request is ready to be dispatched by the device */
+ dev_dbg(dev->device,
+ "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+ req->nbytes, req->src, req->dst);
+
+ /* assign new request to device */
+ dev->req = req;
+ dev->total = req->nbytes;
+ dev->in_sg = req->src;
+ dev->out_sg = req->dst;
+
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= FLAGS_MODE_MASK;
+ dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ if ((dev->flags & FLAGS_CBC) && req->info)
+ memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+
+ /* assign new context to device */
+ ctx->dev = dev;
+ dev->ctx = ctx;
+
+ ret = sahara_hw_descriptor_create(dev);
+ if (ret < 0) {
+ spin_lock(&dev->lock);
+ clear_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock(&dev->lock);
+ dev->req->base.complete(&dev->req->base, ret);
+ }
+}
+
+static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int ret;
+
+ ctx->keylen = keylen;
+
+ /* SAHARA only supports 128bit keys */
+ if (keylen == AES_KEYSIZE_128) {
+ memcpy(ctx->key, key, keylen);
+ ctx->flags |= FLAGS_NEW_KEY;
+ return 0;
+ }
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ /*
+ * The requested key size is not supported by HW, do a fallback.
+ */
+ ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->fallback->base.crt_flags |=
+ (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+ if (ret) {
+ struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
+
+ tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm_aux->crt_flags |=
+ (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct sahara_dev *dev = dev_ptr;
+ int err = 0;
+ int busy;
+
+ dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
+ req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ dev_err(dev->device,
+ "request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+
+ ctx->dev = dev;
+
+ rctx->mode = mode;
+ spin_lock_bh(&dev->lock);
+ err = ablkcipher_enqueue_request(&dev->queue, req);
+ busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock_bh(&dev->lock);
+
+ if (!busy)
+ tasklet_schedule(&dev->queue_task);
+
+ return err;
+}
+
+static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_encrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, 0);
+}
+
+static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_encrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, FLAGS_CBC);
+}
+
+static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->fallback = crypto_alloc_ablkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback)) {
+ pr_err("Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->fallback);
+ }
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+
+ return 0;
+}
+
+static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_ablkcipher(ctx->fallback);
+ ctx->fallback = NULL;
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "sahara-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_aes_cra_init,
+ .cra_exit = sahara_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
+ }
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "sahara-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_aes_cra_init,
+ .cra_exit = sahara_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
+ }
+}
+};
+
+static irqreturn_t sahara_irq_handler(int irq, void *data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
+ unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
+
+ del_timer(&dev->watchdog);
+
+ sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
+ SAHARA_REG_CMD);
+
+ sahara_decode_status(dev, stat);
+
+ if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
+ return IRQ_NONE;
+ } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
+ dev->error = 0;
+ } else {
+ sahara_decode_error(dev, err);
+ dev->error = -EINVAL;
+ }
+
+ tasklet_schedule(&dev->done_task);
+
+ return IRQ_HANDLED;
+}
+
+
+static int sahara_register_algs(struct sahara_dev *dev)
+{
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ INIT_LIST_HEAD(&aes_algs[i].cra_list);
+ err = crypto_register_alg(&aes_algs[i]);
+ if (err)
+ goto err_aes_algs;
+ }
+
+ return 0;
+
+err_aes_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&aes_algs[j]);
+
+ return err;
+}
+
+static void sahara_unregister_algs(struct sahara_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+ crypto_unregister_alg(&aes_algs[i]);
+}
+
+static struct platform_device_id sahara_platform_ids[] = {
+ { .name = "sahara-imx27" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
+
+static struct of_device_id sahara_dt_ids[] = {
+ { .compatible = "fsl,imx27-sahara" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sahara_dt_ids);
+
+static int sahara_probe(struct platform_device *pdev)
+{
+ struct sahara_dev *dev;
+ struct resource *res;
+ u32 version;
+ int irq;
+ int err;
+ int i;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "unable to alloc data struct.\n");
+ return -ENOMEM;
+ }
+
+ dev->device = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ return -ENODEV;
+ }
+
+ if (devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), SAHARA_NAME) == NULL) {
+ dev_err(&pdev->dev, "failed to request memory region\n");
+ return -ENOENT;
+ }
+ dev->regs_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dev->regs_base) {
+ dev_err(&pdev->dev, "failed to ioremap address region\n");
+ return -ENOENT;
+ }
+
+ /* Get the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return irq;
+ }
+
+ if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
+ 0, SAHARA_NAME, dev) < 0) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return -ENOENT;
+ }
+
+ /* clocks */
+ dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(dev->clk_ipg)) {
+ dev_err(&pdev->dev, "Could not get ipg clock\n");
+ return PTR_ERR(dev->clk_ipg);
+ }
+
+ dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb)) {
+ dev_err(&pdev->dev, "Could not get ahb clock\n");
+ return PTR_ERR(dev->clk_ahb);
+ }
+
+ /* Allocate HW descriptors */
+ dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
+ SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+ &dev->hw_phys_desc[0], GFP_KERNEL);
+ if (!dev->hw_desc[0]) {
+ dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
+ return -ENOMEM;
+ }
+ dev->hw_desc[1] = dev->hw_desc[0] + 1;
+ dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
+ sizeof(struct sahara_hw_desc);
+
+ /* Allocate space for iv and key */
+ dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+ &dev->key_phys_base, GFP_KERNEL);
+ if (!dev->key_base) {
+ dev_err(&pdev->dev, "Could not allocate memory for key\n");
+ err = -ENOMEM;
+ goto err_key;
+ }
+ dev->iv_base = dev->key_base + AES_KEYSIZE_128;
+ dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
+
+ /* Allocate space for HW links */
+ dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
+ SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+ &dev->hw_phys_link[0], GFP_KERNEL);
+ if (!dev->hw_link) {
+ dev_err(&pdev->dev, "Could not allocate hw links\n");
+ err = -ENOMEM;
+ goto err_link;
+ }
+ for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
+ dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
+ sizeof(struct sahara_hw_link);
+ dev->hw_link[i] = dev->hw_link[i - 1] + 1;
+ }
+
+ crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
+
+ dev_ptr = dev;
+
+ tasklet_init(&dev->queue_task, sahara_aes_queue_task,
+ (unsigned long)dev);
+ tasklet_init(&dev->done_task, sahara_aes_done_task,
+ (unsigned long)dev);
+
+ init_timer(&dev->watchdog);
+ dev->watchdog.function = &sahara_watchdog;
+ dev->watchdog.data = (unsigned long)dev;
+
+ clk_prepare_enable(dev->clk_ipg);
+ clk_prepare_enable(dev->clk_ahb);
+
+ version = sahara_read(dev, SAHARA_REG_VERSION);
+ if (version != SAHARA_VERSION_3) {
+ dev_err(&pdev->dev, "SAHARA version %d not supported\n",
+ version);
+ err = -ENODEV;
+ goto err_algs;
+ }
+
+ sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
+ SAHARA_REG_CMD);
+ sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
+ SAHARA_CONTROL_SET_MAXBURST(8) |
+ SAHARA_CONTROL_RNG_AUTORSD |
+ SAHARA_CONTROL_ENABLE_INT,
+ SAHARA_REG_CONTROL);
+
+ err = sahara_register_algs(dev);
+ if (err)
+ goto err_algs;
+
+ dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
+
+ return 0;
+
+err_algs:
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+ dev->hw_link[0], dev->hw_phys_link[0]);
+ clk_disable_unprepare(dev->clk_ipg);
+ clk_disable_unprepare(dev->clk_ahb);
+ dev_ptr = NULL;
+err_link:
+ dma_free_coherent(&pdev->dev,
+ 2 * AES_KEYSIZE_128,
+ dev->key_base, dev->key_phys_base);
+err_key:
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+ dev->hw_desc[0], dev->hw_phys_desc[0]);
+
+ return err;
+}
+
+static int sahara_remove(struct platform_device *pdev)
+{
+ struct sahara_dev *dev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+ dev->hw_link[0], dev->hw_phys_link[0]);
+ dma_free_coherent(&pdev->dev,
+ 2 * AES_KEYSIZE_128,
+ dev->key_base, dev->key_phys_base);
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+ dev->hw_desc[0], dev->hw_phys_desc[0]);
+
+ tasklet_kill(&dev->done_task);
+ tasklet_kill(&dev->queue_task);
+
+ sahara_unregister_algs(dev);
+
+ clk_disable_unprepare(dev->clk_ipg);
+ clk_disable_unprepare(dev->clk_ahb);
+
+ dev_ptr = NULL;
+
+ return 0;
+}
+
+static struct platform_driver sahara_driver = {
+ .probe = sahara_probe,
+ .remove = sahara_remove,
+ .driver = {
+ .name = SAHARA_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sahara_dt_ids),
+ },
+ .id_table = sahara_platform_ids,
+};
+
+module_platform_driver(sahara_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index e208cea..3eafa90 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -12,8 +12,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <mach/hardware.h>
-
#include "cryp_p.h"
#include "cryp.h"
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 22c9063..32f4806 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -32,7 +32,6 @@
#include <crypto/scatterwalk.h>
#include <linux/platform_data/crypto-ux500.h>
-#include <mach/hardware.h>
#include "cryp_p.h"
#include "cryp.h"
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 632c333..cf55089 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -32,7 +32,6 @@
#include <crypto/algapi.h>
#include <linux/platform_data/crypto-ux500.h>
-#include <mach/hardware.h>
#include "hash_alg.h"
@@ -939,6 +938,7 @@ static int hash_dma_final(struct ahash_request *req)
if (!ctx->device->dma.nents) {
dev_err(device_data->dev, "[%s] "
"ctx->device->dma.nents = 0", __func__);
+ ret = ctx->device->dma.nents;
goto out;
}
@@ -946,6 +946,7 @@ static int hash_dma_final(struct ahash_request *req)
if (bytes_written != req->nbytes) {
dev_err(device_data->dev, "[%s] "
"hash_dma_write() failed!", __func__);
+ ret = bytes_written;
goto out;
}
@@ -1368,14 +1369,12 @@ static int hash_setkey(struct crypto_ahash *tfm,
/**
* Freed in final.
*/
- ctx->key = kmalloc(keylen, GFP_KERNEL);
+ ctx->key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key) {
pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
"for %d\n", __func__, alg);
return -ENOMEM;
}
-
- memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
return ret;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 8f6d30d..b48a79c 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -27,6 +27,7 @@
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <asm/irq.h>
@@ -139,6 +140,8 @@ struct mxs_dma_engine {
struct dma_device dma_device;
struct device_dma_parameters dma_parms;
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
+ struct platform_device *pdev;
+ unsigned int nr_channels;
};
struct mxs_dma_type {
@@ -350,10 +353,8 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- if (!data)
- return -EINVAL;
-
- mxs_chan->chan_irq = data->chan_irq;
+ if (data)
+ mxs_chan->chan_irq = data->chan_irq;
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
@@ -665,8 +666,55 @@ err_out:
return ret;
}
+struct mxs_dma_filter_param {
+ struct device_node *of_node;
+ unsigned int chan_id;
+};
+
+static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+ struct mxs_dma_filter_param *param = fn_param;
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_irq;
+
+ if (mxs_dma->dma_device.dev->of_node != param->of_node)
+ return false;
+
+ if (chan->chan_id != param->chan_id)
+ return false;
+
+ chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
+ if (chan_irq < 0)
+ return false;
+
+ mxs_chan->chan_irq = chan_irq;
+
+ return true;
+}
+
+struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
+ struct mxs_dma_filter_param param;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ param.of_node = ofdma->of_node;
+ param.chan_id = dma_spec->args[0];
+
+ if (param.chan_id >= mxs_dma->nr_channels)
+ return NULL;
+
+ return dma_request_channel(mask, mxs_dma_filter_fn, &param);
+}
+
static int __init mxs_dma_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
const struct platform_device_id *id_entry;
const struct of_device_id *of_id;
const struct mxs_dma_type *dma_type;
@@ -674,10 +722,16 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
struct resource *iores;
int ret, i;
- mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
+ mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
if (!mxs_dma)
return -ENOMEM;
+ ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read dma-channels\n");
+ return ret;
+ }
+
of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
if (of_id)
id_entry = of_id->data;
@@ -689,24 +743,13 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dev_id = dma_type->id;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(mxs_dma->base))
+ return PTR_ERR(mxs_dma->base);
- if (!request_mem_region(iores->start, resource_size(iores),
- pdev->name)) {
- ret = -EBUSY;
- goto err_request_region;
- }
-
- mxs_dma->base = ioremap(iores->start, resource_size(iores));
- if (!mxs_dma->base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- mxs_dma->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(mxs_dma->clk)) {
- ret = PTR_ERR(mxs_dma->clk);
- goto err_clk;
- }
+ mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mxs_dma->clk))
+ return PTR_ERR(mxs_dma->clk);
dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
@@ -732,8 +775,9 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
ret = mxs_dma_init(mxs_dma);
if (ret)
- goto err_init;
+ return ret;
+ mxs_dma->pdev = pdev;
mxs_dma->dma_device.dev = &pdev->dev;
/* mxs_dma gets 65535 bytes maximum sg size */
@@ -751,22 +795,19 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
- goto err_init;
+ return ret;
+ }
+
+ ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
+ if (ret) {
+ dev_err(mxs_dma->dma_device.dev,
+ "failed to register controller\n");
+ dma_async_device_unregister(&mxs_dma->dma_device);
}
dev_info(mxs_dma->dma_device.dev, "initialized\n");
return 0;
-
-err_init:
- clk_put(mxs_dma->clk);
-err_clk:
- iounmap(mxs_dma->base);
-err_ioremap:
- release_mem_region(iores->start, resource_size(iores));
-err_request_region:
- kfree(mxs_dma);
- return ret;
}
static struct platform_driver mxs_dma_driver = {
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3e53200..9387630 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -36,42 +36,6 @@ config FIRMWARE_MEMMAP
See also Documentation/ABI/testing/sysfs-firmware-memmap.
-config EFI_VARS
- tristate "EFI Variable Support via sysfs"
- depends on EFI
- select UCS2_STRING
- default n
- help
- If you say Y here, you are able to get EFI (Extensible Firmware
- Interface) variable information via sysfs. You may read,
- write, create, and destroy EFI variables through this interface.
-
- Note that using this driver in concert with efibootmgr requires
- at least test release version 0.5.0-test3 or later, which is
- available from Matt Domsch's website located at:
- <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz>
-
- Subsequent efibootmgr releases may be found at:
- <http://linux.dell.com/efibootmgr>
-
-config EFI_VARS_PSTORE
- bool "Register efivars backend for pstore"
- depends on EFI_VARS && PSTORE
- default y
- help
- Say Y here to enable use efivars as a backend to pstore. This
- will allow writing console messages, crash dumps, or anything
- else supported by pstore to EFI variables.
-
-config EFI_VARS_PSTORE_DEFAULT_DISABLE
- bool "Disable using efivars as a pstore backend by default"
- depends on EFI_VARS_PSTORE
- default n
- help
- Saying Y here will disable the use of efivars as a storage
- backend for pstore by default. This setting can be overridden
- using the efivars module's pstore_disable parameter.
-
config EFI_PCDP
bool "Console device selection via EFI PCDP or HCDP table"
depends on ACPI && EFI && IA64
@@ -165,5 +129,6 @@ config ISCSI_IBFT
Otherwise, say N.
source "drivers/firmware/google/Kconfig"
+source "drivers/firmware/efi/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5a7e273..299fad6 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -4,7 +4,6 @@
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
-obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_PCDP) += pcdp.o
obj-$(CONFIG_DELL_RBU) += dell_rbu.o
obj-$(CONFIG_DCDBAS) += dcdbas.o
@@ -14,3 +13,4 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
+obj-$(CONFIG_EFI) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
new file mode 100644
index 0000000..b0fc7c7
--- /dev/null
+++ b/drivers/firmware/efi/Kconfig
@@ -0,0 +1,39 @@
+menu "EFI (Extensible Firmware Interface) Support"
+ depends on EFI
+
+config EFI_VARS
+ tristate "EFI Variable Support via sysfs"
+ depends on EFI
+ default n
+ help
+ If you say Y here, you are able to get EFI (Extensible Firmware
+ Interface) variable information via sysfs. You may read,
+ write, create, and destroy EFI variables through this interface.
+
+ Note that using this driver in concert with efibootmgr requires
+ at least test release version 0.5.0-test3 or later, which is
+ available from Matt Domsch's website located at:
+ <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz>
+
+ Subsequent efibootmgr releases may be found at:
+ <http://linux.dell.com/efibootmgr>
+
+config EFI_VARS_PSTORE
+ tristate "Register efivars backend for pstore"
+ depends on EFI_VARS && PSTORE
+ default y
+ help
+ Say Y here to enable use efivars as a backend to pstore. This
+ will allow writing console messages, crash dumps, or anything
+ else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+ bool "Disable using efivars as a pstore backend by default"
+ depends on EFI_VARS_PSTORE
+ default n
+ help
+ Saying Y here will disable the use of efivars as a storage
+ backend for pstore by default. This setting can be overridden
+ using the efivars module's pstore_disable parameter.
+
+endmenu
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
new file mode 100644
index 0000000..99245ab
--- /dev/null
+++ b/drivers/firmware/efi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for linux kernel
+#
+obj-y += efi.o vars.o
+obj-$(CONFIG_EFI_VARS) += efivars.o
+obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
new file mode 100644
index 0000000..202d2c8
--- /dev/null
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -0,0 +1,252 @@
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/pstore.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+
+#define DUMP_NAME_LEN 52
+
+static bool efivars_pstore_disable =
+ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
+#define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ efivar_entry_iter_begin();
+ psi->data = NULL;
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ efivar_entry_iter_end();
+ psi->data = NULL;
+ return 0;
+}
+
+struct pstore_read_data {
+ u64 *id;
+ enum pstore_type_id *type;
+ int *count;
+ struct timespec *timespec;
+ char **buf;
+};
+
+static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+{
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct pstore_read_data *cb_data = data;
+ char name[DUMP_NAME_LEN];
+ int i;
+ int cnt;
+ unsigned int part;
+ unsigned long time, size;
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ return 0;
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ name[i] = entry->var.VariableName[i];
+
+ if (sscanf(name, "dump-type%u-%u-%d-%lu",
+ cb_data->type, &part, &cnt, &time) == 4) {
+ *cb_data->id = part;
+ *cb_data->count = cnt;
+ cb_data->timespec->tv_sec = time;
+ cb_data->timespec->tv_nsec = 0;
+ } else if (sscanf(name, "dump-type%u-%u-%lu",
+ cb_data->type, &part, &time) == 3) {
+ /*
+ * Check if an old format,
+ * which doesn't support holding
+ * multiple logs, remains.
+ */
+ *cb_data->id = part;
+ *cb_data->count = 0;
+ cb_data->timespec->tv_sec = time;
+ cb_data->timespec->tv_nsec = 0;
+ } else
+ return 0;
+
+ entry->var.DataSize = 1024;
+ __efivar_entry_get(entry, &entry->var.Attributes,
+ &entry->var.DataSize, entry->var.Data);
+ size = entry->var.DataSize;
+
+ *cb_data->buf = kmalloc(size, GFP_KERNEL);
+ if (*cb_data->buf == NULL)
+ return -ENOMEM;
+ memcpy(*cb_data->buf, entry->var.Data, size);
+ return size;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
+{
+ struct pstore_read_data data;
+
+ data.id = id;
+ data.type = type;
+ data.count = count;
+ data.timespec = timespec;
+ data.buf = buf;
+
+ return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data,
+ (struct efivar_entry **)&psi->data);
+}
+
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
+ unsigned int part, int count, size_t size,
+ struct pstore_info *psi)
+{
+ char name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ int i, ret = 0;
+
+ sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count,
+ get_seconds());
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
+ !pstore_cannot_block_path(reason),
+ size, psi->buf);
+
+ if (reason == KMSG_DUMP_OOPS)
+ efivar_run_worker();
+
+ *id = part;
+ return ret;
+};
+
+struct pstore_erase_data {
+ u64 id;
+ enum pstore_type_id type;
+ int count;
+ struct timespec time;
+ efi_char16_t *name;
+};
+
+/*
+ * Clean up an entry with the same name
+ */
+static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
+{
+ struct pstore_erase_data *ed = data;
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ efi_char16_t efi_name_old[DUMP_NAME_LEN];
+ efi_char16_t *efi_name = ed->name;
+ unsigned long ucs2_len = ucs2_strlen(ed->name);
+ char name_old[DUMP_NAME_LEN];
+ int i;
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ return 0;
+
+ if (ucs2_strncmp(entry->var.VariableName,
+ efi_name, (size_t)ucs2_len)) {
+ /*
+ * Check if an old format, which doesn't support
+ * holding multiple logs, remains.
+ */
+ sprintf(name_old, "dump-type%u-%u-%lu", ed->type,
+ (unsigned int)ed->id, ed->time.tv_sec);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name_old[i] = name_old[i];
+
+ if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
+ ucs2_strlen(efi_name_old)))
+ return 0;
+ }
+
+ /* found */
+ __efivar_entry_delete(entry);
+ list_del(&entry->list);
+
+ return 1;
+}
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
+ struct timespec time, struct pstore_info *psi)
+{
+ struct pstore_erase_data edata;
+ struct efivar_entry *entry = NULL;
+ char name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ int found, i;
+
+ sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
+ time.tv_sec);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ edata.id = id;
+ edata.type = type;
+ edata.count = count;
+ edata.time = time;
+ edata.name = efi_name;
+
+ efivar_entry_iter_begin();
+ found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry);
+ efivar_entry_iter_end();
+
+ if (found)
+ efivar_unregister(entry);
+
+ return 0;
+}
+
+static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "efi",
+ .open = efi_pstore_open,
+ .close = efi_pstore_close,
+ .read = efi_pstore_read,
+ .write = efi_pstore_write,
+ .erase = efi_pstore_erase,
+};
+
+static __init int efivars_pstore_init(void)
+{
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ if (!efivars_kobject())
+ return 0;
+
+ if (efivars_pstore_disable)
+ return 0;
+
+ efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+ if (!efi_pstore_info.buf)
+ return -ENOMEM;
+
+ efi_pstore_info.bufsize = 1024;
+ spin_lock_init(&efi_pstore_info.buf_lock);
+
+ pstore_register(&efi_pstore_info);
+
+ return 0;
+}
+
+static __exit void efivars_pstore_exit(void)
+{
+}
+
+module_init(efivars_pstore_init);
+module_exit(efivars_pstore_exit);
+
+MODULE_DESCRIPTION("EFI variable backend for pstore");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
new file mode 100644
index 0000000..5145fa3
--- /dev/null
+++ b/drivers/firmware/efi/efi.c
@@ -0,0 +1,134 @@
+/*
+ * efi.c - EFI subsystem
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
+ *
+ * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
+ * allowing the efivarfs to be mounted or the efivars module to be loaded.
+ * The existance of /sys/firmware/efi may also be used by userspace to
+ * determine that the system supports EFI.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+
+static struct kobject *efi_kobj;
+static struct kobject *efivars_kobj;
+
+/*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+ */
+static ssize_t systab_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *str = buf;
+
+ if (!kobj || !buf)
+ return -EINVAL;
+
+ if (efi.mps != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "MPS=0x%lx\n", efi.mps);
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
+ if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
+ if (efi.smbios != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
+ if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
+ if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
+ if (efi.uga != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "UGA=0x%lx\n", efi.uga);
+
+ return str - buf;
+}
+
+static struct kobj_attribute efi_attr_systab =
+ __ATTR(systab, 0400, systab_show, NULL);
+
+static struct attribute *efi_subsys_attrs[] = {
+ &efi_attr_systab.attr,
+ NULL, /* maybe more in the future? */
+};
+
+static struct attribute_group efi_subsys_attr_group = {
+ .attrs = efi_subsys_attrs,
+};
+
+static struct efivars generic_efivars;
+static struct efivar_operations generic_ops;
+
+static int generic_ops_register(void)
+{
+ generic_ops.get_variable = efi.get_variable;
+ generic_ops.set_variable = efi.set_variable;
+ generic_ops.get_next_variable = efi.get_next_variable;
+ generic_ops.query_variable_store = efi_query_variable_store;
+
+ return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+}
+
+static void generic_ops_unregister(void)
+{
+ efivars_unregister(&generic_efivars);
+}
+
+/*
+ * We register the efi subsystem with the firmware subsystem and the
+ * efivars subsystem with the efi subsystem, if the system was booted with
+ * EFI.
+ */
+static int __init efisubsys_init(void)
+{
+ int error;
+
+ if (!efi_enabled(EFI_BOOT))
+ return 0;
+
+ /* We register the efi directory at /sys/firmware/efi */
+ efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ if (!efi_kobj) {
+ pr_err("efi: Firmware registration failed.\n");
+ return -ENOMEM;
+ }
+
+ error = generic_ops_register();
+ if (error)
+ goto err_put;
+
+ error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
+ if (error) {
+ pr_err("efi: Sysfs attribute export failed with error %d.\n",
+ error);
+ goto err_unregister;
+ }
+
+ /* and the standard mountpoint for efivarfs */
+ efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
+ if (!efivars_kobj) {
+ pr_err("efivars: Subsystem registration failed.\n");
+ error = -ENOMEM;
+ goto err_remove_group;
+ }
+
+ return 0;
+
+err_remove_group:
+ sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
+err_unregister:
+ generic_ops_unregister();
+err_put:
+ kobject_put(efi_kobj);
+ return error;
+}
+
+subsys_initcall(efisubsys_init);
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
new file mode 100644
index 0000000..b623c59
--- /dev/null
+++ b/drivers/firmware/efi/efivars.c
@@ -0,0 +1,617 @@
+/*
+ * Originally from efivars.c,
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ *
+ * This code takes all variables accessible from EFI runtime and
+ * exports them via sysfs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Changelog:
+ *
+ * 17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
+ * remove check for efi_enabled in exit
+ * add MODULE_VERSION
+ *
+ * 26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
+ * minor bug fixes
+ *
+ * 21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
+ * converted driver to export variable information via sysfs
+ * and moved to drivers/firmware directory
+ * bumped revision number to v0.07 to reflect conversion & move
+ *
+ * 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
+ * fix locking per Peter Chubb's findings
+ *
+ * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
+ * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse()
+ *
+ * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
+ * use list_for_each_safe when deleting vars.
+ * remove ifdef CONFIG_SMP around include <linux/smp.h>
+ * v0.04 release to linux-ia64@linuxia64.org
+ *
+ * 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ * Moved vars from /proc/efi to /proc/efi/vars, and made
+ * efi.c own the /proc/efi directory.
+ * v0.03 release to linux-ia64@linuxia64.org
+ *
+ * 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ * At the request of Stephane, moved ownership of /proc/efi
+ * to efi.c, and now efivars lives under /proc/efi/vars.
+ *
+ * 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ * Feedback received from Stephane Eranian incorporated.
+ * efivar_write() checks copy_from_user() return value.
+ * efivar_read/write() returns proper errno.
+ * v0.02 release to linux-ia64@linuxia64.org
+ *
+ * 26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ * v0.01 release to linux-ia64@linuxia64.org
+ */
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+
+#define EFIVARS_VERSION "0.08"
+#define EFIVARS_DATE "2004-May-17"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
+MODULE_DESCRIPTION("sysfs interface to EFI Variables");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EFIVARS_VERSION);
+
+LIST_HEAD(efivar_sysfs_list);
+EXPORT_SYMBOL_GPL(efivar_sysfs_list);
+
+static struct kset *efivars_kset;
+
+static struct bin_attribute *efivars_new_var;
+static struct bin_attribute *efivars_del_var;
+
+struct efivar_attribute {
+ struct attribute attr;
+ ssize_t (*show) (struct efivar_entry *entry, char *buf);
+ ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+};
+
+#define EFIVAR_ATTR(_name, _mode, _show, _store) \
+struct efivar_attribute efivar_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
+#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
+
+/*
+ * Prototype for sysfs creation function
+ */
+static int
+efivar_create_sysfs_entry(struct efivar_entry *new_var);
+
+static ssize_t
+efivar_guid_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ char *str = buf;
+
+ if (!entry || !buf)
+ return 0;
+
+ efi_guid_unparse(&var->VendorGuid, str);
+ str += strlen(str);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+static ssize_t
+efivar_attr_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ char *str = buf;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ var->DataSize = 1024;
+ if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+ return -EIO;
+
+ if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+ str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
+ if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
+ if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
+ if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
+ str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
+ if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
+ str += sprintf(str,
+ "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
+ if (var->Attributes &
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
+ str += sprintf(str,
+ "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
+ if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
+ str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
+ return str - buf;
+}
+
+static ssize_t
+efivar_size_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ char *str = buf;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ var->DataSize = 1024;
+ if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+ return -EIO;
+
+ str += sprintf(str, "0x%lx\n", var->DataSize);
+ return str - buf;
+}
+
+static ssize_t
+efivar_data_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ var->DataSize = 1024;
+ if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+ return -EIO;
+
+ memcpy(buf, var->Data, var->DataSize);
+ return var->DataSize;
+}
+/*
+ * We allow each variable to be edited via rewriting the
+ * entire efi variable structure.
+ */
+static ssize_t
+efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+{
+ struct efi_variable *new_var, *var = &entry->var;
+ int err;
+
+ if (count != sizeof(struct efi_variable))
+ return -EINVAL;
+
+ new_var = (struct efi_variable *)buf;
+ /*
+ * If only updating the variable data, then the name
+ * and guid should remain the same
+ */
+ if (memcmp(new_var->VariableName, var->VariableName, sizeof(var->VariableName)) ||
+ efi_guidcmp(new_var->VendorGuid, var->VendorGuid)) {
+ printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
+ return -EINVAL;
+ }
+
+ if ((new_var->DataSize <= 0) || (new_var->Attributes == 0)){
+ printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
+ return -EINVAL;
+ }
+
+ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+ efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+
+ memcpy(&entry->var, new_var, count);
+
+ err = efivar_entry_set(entry, new_var->Attributes,
+ new_var->DataSize, new_var->Data, false);
+ if (err) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
+ return -EIO;
+ }
+
+ return count;
+}
+
+static ssize_t
+efivar_show_raw(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+
+ if (!entry || !buf)
+ return 0;
+
+ var->DataSize = 1024;
+ if (efivar_entry_get(entry, &entry->var.Attributes,
+ &entry->var.DataSize, entry->var.Data))
+ return -EIO;
+
+ memcpy(buf, var, sizeof(*var));
+
+ return sizeof(*var);
+}
+
+/*
+ * Generic read/write functions that call the specific functions of
+ * the attributes...
+ */
+static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct efivar_entry *var = to_efivar_entry(kobj);
+ struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (efivar_attr->show) {
+ ret = efivar_attr->show(var, buf);
+ }
+ return ret;
+}
+
+static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efivar_entry *var = to_efivar_entry(kobj);
+ struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (efivar_attr->store)
+ ret = efivar_attr->store(var, buf, count);
+
+ return ret;
+}
+
+static const struct sysfs_ops efivar_attr_ops = {
+ .show = efivar_attr_show,
+ .store = efivar_attr_store,
+};
+
+static void efivar_release(struct kobject *kobj)
+{
+ struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
+ kfree(var);
+}
+
+static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
+static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
+static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
+static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
+static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
+
+static struct attribute *def_attrs[] = {
+ &efivar_attr_guid.attr,
+ &efivar_attr_size.attr,
+ &efivar_attr_attributes.attr,
+ &efivar_attr_data.attr,
+ &efivar_attr_raw_var.attr,
+ NULL,
+};
+
+static struct kobj_type efivar_ktype = {
+ .release = efivar_release,
+ .sysfs_ops = &efivar_attr_ops,
+ .default_attrs = def_attrs,
+};
+
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct efi_variable *new_var = (struct efi_variable *)buf;
+ struct efivar_entry *new_entry;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+ efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+
+ memcpy(&new_entry->var, new_var, sizeof(*new_var));
+
+ err = efivar_entry_set(new_entry, new_var->Attributes, new_var->DataSize,
+ new_var->Data, &efivar_sysfs_list);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (efivar_create_sysfs_entry(new_entry)) {
+ printk(KERN_WARNING "efivars: failed to create sysfs entry.\n");
+ kfree(new_entry);
+ }
+ return count;
+
+out:
+ kfree(new_entry);
+ return err;
+}
+
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct efi_variable *del_var = (struct efi_variable *)buf;
+ struct efivar_entry *entry;
+ int err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ efivar_entry_iter_begin();
+ entry = efivar_entry_find(del_var->VariableName, del_var->VendorGuid,
+ &efivar_sysfs_list, true);
+ if (!entry)
+ err = -EINVAL;
+ else if (__efivar_entry_delete(entry))
+ err = -EIO;
+
+ efivar_entry_iter_end();
+
+ if (err)
+ return err;
+
+ efivar_unregister(entry);
+
+ /* It's dead Jim.... */
+ return count;
+}
+
+/**
+ * efivar_create_sysfs_entry - create a new entry in sysfs
+ * @new_var: efivar entry to create
+ *
+ * Returns 1 on failure, 0 on success
+ */
+static int
+efivar_create_sysfs_entry(struct efivar_entry *new_var)
+{
+ int i, short_name_size;
+ char *short_name;
+ unsigned long variable_name_size;
+ efi_char16_t *variable_name;
+
+ variable_name = new_var->var.VariableName;
+ variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
+
+ /*
+ * Length of the variable bytes in ASCII, plus the '-' separator,
+ * plus the GUID, plus trailing NUL
+ */
+ short_name_size = variable_name_size / sizeof(efi_char16_t)
+ + 1 + EFI_VARIABLE_GUID_LEN + 1;
+
+ short_name = kzalloc(short_name_size, GFP_KERNEL);
+
+ if (!short_name)
+ return 1;
+
+ /* Convert Unicode to normal chars (assume top bits are 0),
+ ala UTF-8 */
+ for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
+ short_name[i] = variable_name[i] & 0xFF;
+ }
+ /* This is ugly, but necessary to separate one vendor's
+ private variables from another's. */
+
+ *(short_name + strlen(short_name)) = '-';
+ efi_guid_unparse(&new_var->var.VendorGuid,
+ short_name + strlen(short_name));
+
+ new_var->kobj.kset = efivars_kset;
+
+ i = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
+ NULL, "%s", short_name);
+ kfree(short_name);
+ if (i)
+ return 1;
+
+ kobject_uevent(&new_var->kobj, KOBJ_ADD);
+ efivar_entry_add(new_var, &efivar_sysfs_list);
+
+ return 0;
+}
+
+static int
+create_efivars_bin_attributes(void)
+{
+ struct bin_attribute *attr;
+ int error;
+
+ /* new_var */
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->attr.name = "new_var";
+ attr->attr.mode = 0200;
+ attr->write = efivar_create;
+ efivars_new_var = attr;
+
+ /* del_var */
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr) {
+ error = -ENOMEM;
+ goto out_free;
+ }
+ attr->attr.name = "del_var";
+ attr->attr.mode = 0200;
+ attr->write = efivar_delete;
+ efivars_del_var = attr;
+
+ sysfs_bin_attr_init(efivars_new_var);
+ sysfs_bin_attr_init(efivars_del_var);
+
+ /* Register */
+ error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var);
+ if (error) {
+ printk(KERN_ERR "efivars: unable to create new_var sysfs file"
+ " due to error %d\n", error);
+ goto out_free;
+ }
+
+ error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var);
+ if (error) {
+ printk(KERN_ERR "efivars: unable to create del_var sysfs file"
+ " due to error %d\n", error);
+ sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
+ goto out_free;
+ }
+
+ return 0;
+out_free:
+ kfree(efivars_del_var);
+ efivars_del_var = NULL;
+ kfree(efivars_new_var);
+ efivars_new_var = NULL;
+ return error;
+}
+
+static int efivar_update_sysfs_entry(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry = data;
+
+ if (efivar_entry_find(name, vendor, &efivar_sysfs_list, false))
+ return 0;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+ return 1;
+}
+
+static void efivar_update_sysfs_entries(struct work_struct *work)
+{
+ struct efivar_entry *entry;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ /* Add new sysfs entries */
+ while (1) {
+ memset(entry, 0, sizeof(*entry));
+
+ err = efivar_init(efivar_update_sysfs_entry, entry,
+ true, false, &efivar_sysfs_list);
+ if (!err)
+ break;
+
+ efivar_create_sysfs_entry(entry);
+ }
+
+ kfree(entry);
+}
+
+static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+ efivar_create_sysfs_entry(entry);
+
+ return 0;
+}
+
+static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
+{
+ efivar_entry_remove(entry);
+ efivar_unregister(entry);
+ return 0;
+}
+
+void efivars_sysfs_exit(void)
+{
+ /* Remove all entries and destroy */
+ __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list, NULL, NULL);
+
+ if (efivars_new_var)
+ sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
+ if (efivars_del_var)
+ sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var);
+ kfree(efivars_new_var);
+ kfree(efivars_del_var);
+ kset_unregister(efivars_kset);
+}
+
+int efivars_sysfs_init(void)
+{
+ struct kobject *parent_kobj = efivars_kobject();
+ int error = 0;
+
+ /* No efivars has been registered yet */
+ if (!parent_kobj)
+ return 0;
+
+ printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+ EFIVARS_DATE);
+
+ efivars_kset = kset_create_and_add("vars", NULL, parent_kobj);
+ if (!efivars_kset) {
+ printk(KERN_ERR "efivars: Subsystem registration failed.\n");
+ return -ENOMEM;
+ }
+
+ efivar_init(efivars_sysfs_callback, NULL, false,
+ true, &efivar_sysfs_list);
+
+ error = create_efivars_bin_attributes();
+ if (error) {
+ efivars_sysfs_exit();
+ return error;
+ }
+
+ INIT_WORK(&efivar_work, efivar_update_sysfs_entries);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivars_sysfs_init);
+
+module_init(efivars_sysfs_init);
+module_exit(efivars_sysfs_exit);
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
new file mode 100644
index 0000000..391c67b
--- /dev/null
+++ b/drivers/firmware/efi/vars.c
@@ -0,0 +1,1041 @@
+/*
+ * Originally from efivars.c
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/capability.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/efi.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/ucs2_string.h>
+
+/* Private pointer to registered efivars */
+static struct efivars *__efivars;
+
+static bool efivar_wq_enabled = true;
+DECLARE_WORK(efivar_work, NULL);
+EXPORT_SYMBOL_GPL(efivar_work);
+
+static bool
+validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ struct efi_generic_dev_path *node;
+ int offset = 0;
+
+ node = (struct efi_generic_dev_path *)buffer;
+
+ if (len < sizeof(*node))
+ return false;
+
+ while (offset <= len - sizeof(*node) &&
+ node->length >= sizeof(*node) &&
+ node->length <= len - offset) {
+ offset += node->length;
+
+ if ((node->type == EFI_DEV_END_PATH ||
+ node->type == EFI_DEV_END_PATH2) &&
+ node->sub_type == EFI_DEV_END_ENTIRE)
+ return true;
+
+ node = (struct efi_generic_dev_path *)(buffer + offset);
+ }
+
+ /*
+ * If we're here then either node->length pointed past the end
+ * of the buffer or we reached the end of the buffer without
+ * finding a device path end node.
+ */
+ return false;
+}
+
+static bool
+validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* An array of 16-bit integers */
+ if ((len % 2) != 0)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ u16 filepathlength;
+ int i, desclength = 0, namelen;
+
+ namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
+
+ /* Either "Boot" or "Driver" followed by four digits of hex */
+ for (i = match; i < match+4; i++) {
+ if (var->VariableName[i] > 127 ||
+ hex_to_bin(var->VariableName[i] & 0xff) < 0)
+ return true;
+ }
+
+ /* Reject it if there's 4 digits of hex and then further content */
+ if (namelen > match + 4)
+ return false;
+
+ /* A valid entry must be at least 8 bytes */
+ if (len < 8)
+ return false;
+
+ filepathlength = buffer[4] | buffer[5] << 8;
+
+ /*
+ * There's no stored length for the description, so it has to be
+ * found by hand
+ */
+ desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+ /* Each boot entry must have a descriptor */
+ if (!desclength)
+ return false;
+
+ /*
+ * If the sum of the length of the description, the claimed filepath
+ * length and the original header are greater than the length of the
+ * variable, it's malformed
+ */
+ if ((desclength + filepathlength + 6) > len)
+ return false;
+
+ /*
+ * And, finally, check the filepath
+ */
+ return validate_device_path(var, match, buffer + desclength + 6,
+ filepathlength);
+}
+
+static bool
+validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* A single 16-bit integer */
+ if (len != 2)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (buffer[i] > 127)
+ return false;
+
+ if (buffer[i] == 0)
+ return true;
+ }
+
+ return false;
+}
+
+struct variable_validate {
+ char *name;
+ bool (*validate)(struct efi_variable *var, int match, u8 *data,
+ unsigned long len);
+};
+
+static const struct variable_validate variable_validate[] = {
+ { "BootNext", validate_uint16 },
+ { "BootOrder", validate_boot_order },
+ { "DriverOrder", validate_boot_order },
+ { "Boot*", validate_load_option },
+ { "Driver*", validate_load_option },
+ { "ConIn", validate_device_path },
+ { "ConInDev", validate_device_path },
+ { "ConOut", validate_device_path },
+ { "ConOutDev", validate_device_path },
+ { "ErrOut", validate_device_path },
+ { "ErrOutDev", validate_device_path },
+ { "Timeout", validate_uint16 },
+ { "Lang", validate_ascii_string },
+ { "PlatformLang", validate_ascii_string },
+ { "", NULL },
+};
+
+bool
+efivar_validate(struct efi_variable *var, u8 *data, unsigned long len)
+{
+ int i;
+ u16 *unicode_name = var->VariableName;
+
+ for (i = 0; variable_validate[i].validate != NULL; i++) {
+ const char *name = variable_validate[i].name;
+ int match;
+
+ for (match = 0; ; match++) {
+ char c = name[match];
+ u16 u = unicode_name[match];
+
+ /* All special variables are plain ascii */
+ if (u > 127)
+ return true;
+
+ /* Wildcard in the matching name means we've matched */
+ if (c == '*')
+ return variable_validate[i].validate(var,
+ match, data, len);
+
+ /* Case sensitive match */
+ if (c != u)
+ break;
+
+ /* Reached the end of the string while matching */
+ if (!c)
+ return variable_validate[i].validate(var,
+ match, data, len);
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(efivar_validate);
+
+static efi_status_t
+check_var_size(u32 attributes, unsigned long size)
+{
+ const struct efivar_operations *fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+ return fops->query_variable_store(attributes, size);
+}
+
+static int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_SUCCESS:
+ err = 0;
+ break;
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
+ struct list_head *head)
+{
+ struct efivar_entry *entry, *n;
+ unsigned long strsize1, strsize2;
+ bool found = false;
+
+ strsize1 = ucs2_strsize(variable_name, 1024);
+ list_for_each_entry_safe(entry, n, head, list) {
+ strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+ if (strsize1 == strsize2 &&
+ !memcmp(variable_name, &(entry->var.VariableName),
+ strsize2) &&
+ !efi_guidcmp(entry->var.VendorGuid,
+ *vendor)) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+ unsigned long variable_name_size)
+{
+ unsigned long len;
+ efi_char16_t c;
+
+ /*
+ * The variable name is, by definition, a NULL-terminated
+ * string, so make absolutely sure that variable_name_size is
+ * the value we expect it to be. If not, return the real size.
+ */
+ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+ c = variable_name[(len / sizeof(c)) - 1];
+ if (!c)
+ break;
+ }
+
+ return min(len, variable_name_size);
+}
+
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
+ unsigned long len16)
+{
+ size_t i, len8 = len16 / sizeof(efi_char16_t);
+ char *s8;
+
+ /*
+ * Disable the workqueue since the algorithm it uses for
+ * detecting new variables won't work with this buggy
+ * implementation of GetNextVariableName().
+ */
+ efivar_wq_enabled = false;
+
+ s8 = kzalloc(len8, GFP_KERNEL);
+ if (!s8)
+ return;
+
+ for (i = 0; i < len8; i++)
+ s8[i] = s16[i];
+
+ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+ s8, vendor_guid);
+ kfree(s8);
+}
+
+/**
+ * efivar_init - build the initial list of EFI variables
+ * @func: callback function to invoke for every variable
+ * @data: function-specific data to pass to @func
+ * @atomic: do we need to execute the @func-loop atomically?
+ * @duplicates: error if we encounter duplicates on @head?
+ * @head: initialised head of variable list
+ *
+ * Get every EFI variable from the firmware and invoke @func. @func
+ * should call efivar_entry_add() to build the list of variables.
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool atomic, bool duplicates,
+ struct list_head *head)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ unsigned long variable_name_size = 1024;
+ efi_char16_t *variable_name;
+ efi_status_t status;
+ efi_guid_t vendor_guid;
+ int err = 0;
+
+ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+ if (!variable_name) {
+ printk(KERN_ERR "efivars: Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irq(&__efivars->lock);
+
+ /*
+ * Per EFI spec, the maximum storage allocated for both
+ * the variable name and variable data is 1024 bytes.
+ */
+
+ do {
+ variable_name_size = 1024;
+
+ status = ops->get_next_variable(&variable_name_size,
+ variable_name,
+ &vendor_guid);
+ switch (status) {
+ case EFI_SUCCESS:
+ if (!atomic)
+ spin_unlock_irq(&__efivars->lock);
+
+ variable_name_size = var_name_strnsize(variable_name,
+ variable_name_size);
+
+ /*
+ * Some firmware implementations return the
+ * same variable name on multiple calls to
+ * get_next_variable(). Terminate the loop
+ * immediately as there is no guarantee that
+ * we'll ever see a different variable name,
+ * and may end up looping here forever.
+ */
+ if (duplicates &&
+ variable_is_present(variable_name, &vendor_guid, head)) {
+ dup_variable_bug(variable_name, &vendor_guid,
+ variable_name_size);
+ if (!atomic)
+ spin_lock_irq(&__efivars->lock);
+
+ status = EFI_NOT_FOUND;
+ break;
+ }
+
+ err = func(variable_name, vendor_guid, variable_name_size, data);
+ if (err)
+ status = EFI_NOT_FOUND;
+
+ if (!atomic)
+ spin_lock_irq(&__efivars->lock);
+
+ break;
+ case EFI_NOT_FOUND:
+ break;
+ default:
+ printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
+ status);
+ status = EFI_NOT_FOUND;
+ break;
+ }
+
+ } while (status != EFI_NOT_FOUND);
+
+ spin_unlock_irq(&__efivars->lock);
+
+ kfree(variable_name);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(efivar_init);
+
+/**
+ * efivar_entry_add - add entry to variable list
+ * @entry: entry to add to list
+ * @head: list head
+ */
+void efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
+{
+ spin_lock_irq(&__efivars->lock);
+ list_add(&entry->list, head);
+ spin_unlock_irq(&__efivars->lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_add);
+
+/**
+ * efivar_entry_remove - remove entry from variable list
+ * @entry: entry to remove from list
+ */
+void efivar_entry_remove(struct efivar_entry *entry)
+{
+ spin_lock_irq(&__efivars->lock);
+ list_del(&entry->list);
+ spin_unlock_irq(&__efivars->lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_remove);
+
+/*
+ * efivar_entry_list_del_unlock - remove entry from variable list
+ * @entry: entry to remove
+ *
+ * Remove @entry from the variable list and release the list lock.
+ *
+ * NOTE: slightly weird locking semantics here - we expect to be
+ * called with the efivars lock already held, and we release it before
+ * returning. This is because this function is usually called after
+ * set_variable() while the lock is still held.
+ */
+static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+{
+ WARN_ON(!spin_is_locked(&__efivars->lock));
+
+ list_del(&entry->list);
+ spin_unlock_irq(&__efivars->lock);
+}
+
+/**
+ * __efivar_entry_delete - delete an EFI variable
+ * @entry: entry containing EFI variable to delete
+ *
+ * Delete the variable from the firmware but leave @entry on the
+ * variable list.
+ *
+ * This function differs from efivar_entry_delete() because it does
+ * not remove @entry from the variable list. Also, it is safe to be
+ * called from within a efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() region, unlike efivar_entry_delete().
+ *
+ * Returns 0 on success, or a converted EFI status code if
+ * set_variable() fails.
+ */
+int __efivar_entry_delete(struct efivar_entry *entry)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+ WARN_ON(!spin_is_locked(&__efivars->lock));
+
+ status = ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_delete);
+
+/**
+ * efivar_entry_delete - delete variable and remove entry from list
+ * @entry: entry containing variable to delete
+ *
+ * Delete the variable from the firmware and remove @entry from the
+ * variable list. It is the caller's responsibility to free @entry
+ * once we return.
+ *
+ * Returns 0 on success, or a converted EFI status code if
+ * set_variable() fails.
+ */
+int efivar_entry_delete(struct efivar_entry *entry)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+ spin_lock_irq(&__efivars->lock);
+ status = ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
+ if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
+ spin_unlock_irq(&__efivars->lock);
+ return efi_status_to_err(status);
+ }
+
+ efivar_entry_list_del_unlock(entry);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_delete);
+
+/**
+ * efivar_entry_set - call set_variable()
+ * @entry: entry containing the EFI variable to write
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer containing variable data
+ * @head: head of variable list
+ *
+ * Calls set_variable() for an EFI variable. If creating a new EFI
+ * variable, this function is usually followed by efivar_entry_add().
+ *
+ * Before writing the variable, the remaining EFI variable storage
+ * space is checked to ensure there is enough room available.
+ *
+ * If @head is not NULL a lookup is performed to determine whether
+ * the entry is already on the list.
+ *
+ * Returns 0 on success, -EEXIST if a lookup is performed and the entry
+ * already exists on the list, or a converted EFI status code if
+ * set_variable() fails.
+ */
+int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
+ unsigned long size, void *data, struct list_head *head)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t vendor = entry->var.VendorGuid;
+
+ spin_lock_irq(&__efivars->lock);
+
+ if (head && efivar_entry_find(name, vendor, head, false)) {
+ spin_unlock_irq(&__efivars->lock);
+ return -EEXIST;
+ }
+
+ status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+ if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
+ status = ops->set_variable(name, &vendor,
+ attributes, size, data);
+
+ spin_unlock_irq(&__efivars->lock);
+
+ return efi_status_to_err(status);
+
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set);
+
+/**
+ * efivar_entry_set_safe - call set_variable() if enough space in firmware
+ * @name: buffer containing the variable name
+ * @vendor: variable vendor guid
+ * @attributes: variable attributes
+ * @block: can we block in this context?
+ * @size: size of @data buffer
+ * @data: buffer containing variable data
+ *
+ * Ensures there is enough free storage in the firmware for this variable, and
+ * if so, calls set_variable(). If creating a new EFI variable, this function
+ * is usually followed by efivar_entry_add().
+ *
+ * Returns 0 on success, -ENOSPC if the firmware does not have enough
+ * space for set_variable() to succeed, or a converted EFI status code
+ * if set_variable() fails.
+ */
+int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
+ bool block, unsigned long size, void *data)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ unsigned long flags;
+ efi_status_t status;
+
+ if (!ops->query_variable_store)
+ return -ENOSYS;
+
+ if (!block) {
+ if (!spin_trylock_irqsave(&__efivars->lock, flags))
+ return -EBUSY;
+ } else {
+ spin_lock_irqsave(&__efivars->lock, flags);
+ }
+
+ status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS) {
+ spin_unlock_irqrestore(&__efivars->lock, flags);
+ return -ENOSPC;
+ }
+
+ status = ops->set_variable(name, &vendor, attributes, size, data);
+
+ spin_unlock_irqrestore(&__efivars->lock, flags);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set_safe);
+
+/**
+ * efivar_entry_find - search for an entry
+ * @name: the EFI variable name
+ * @guid: the EFI variable vendor's guid
+ * @head: head of the variable list
+ * @remove: should we remove the entry from the list?
+ *
+ * Search for an entry on the variable list that has the EFI variable
+ * name @name and vendor guid @guid. If an entry is found on the list
+ * and @remove is true, the entry is removed from the list.
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ *
+ * Returns the entry if found on the list, %NULL otherwise.
+ */
+struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ struct list_head *head, bool remove)
+{
+ struct efivar_entry *entry, *n;
+ int strsize1, strsize2;
+ bool found = false;
+
+ WARN_ON(!spin_is_locked(&__efivars->lock));
+
+ list_for_each_entry_safe(entry, n, head, list) {
+ strsize1 = ucs2_strsize(name, 1024);
+ strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+ if (strsize1 == strsize2 &&
+ !memcmp(name, &(entry->var.VariableName), strsize1) &&
+ !efi_guidcmp(guid, entry->var.VendorGuid)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ if (remove)
+ list_del(&entry->list);
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_find);
+
+/**
+ * efivar_entry_size - obtain the size of a variable
+ * @entry: entry for this variable
+ * @size: location to store the variable's size
+ */
+int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+ *size = 0;
+
+ spin_lock_irq(&__efivars->lock);
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid, NULL, size, NULL);
+ spin_unlock_irq(&__efivars->lock);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_size);
+
+/**
+ * __efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ */
+int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+ WARN_ON(!spin_is_locked(&__efivars->lock));
+
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_get);
+
+/**
+ * efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ */
+int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+ spin_lock_irq(&__efivars->lock);
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
+ spin_unlock_irq(&__efivars->lock);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_get);
+
+/**
+ * efivar_entry_set_get_size - call set_variable() and get new size (atomic)
+ * @entry: entry containing variable to set and get
+ * @attributes: attributes of variable to be written
+ * @size: size of data buffer
+ * @data: buffer containing data to write
+ * @set: did the set_variable() call succeed?
+ *
+ * This is a pretty special (complex) function. See efivarfs_file_write().
+ *
+ * Atomically call set_variable() for @entry and if the call is
+ * successful, return the new size of the variable from get_variable()
+ * in @size. The success of set_variable() is indicated by @set.
+ *
+ * Returns 0 on success, -EINVAL if the variable data is invalid,
+ * -ENOSPC if the firmware does not have enough available space, or a
+ * converted EFI status code if either of set_variable() or
+ * get_variable() fail.
+ *
+ * If the EFI variable does not exist when calling set_variable()
+ * (EFI_NOT_FOUND), @entry is removed from the variable list.
+ */
+int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ unsigned long *size, void *data, bool *set)
+{
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t *vendor = &entry->var.VendorGuid;
+ efi_status_t status;
+ int err;
+
+ *set = false;
+
+ if (efivar_validate(&entry->var, data, *size) == false)
+ return -EINVAL;
+
+ /*
+ * The lock here protects the get_variable call, the conditional
+ * set_variable call, and removal of the variable from the efivars
+ * list (in the case of an authenticated delete).
+ */
+ spin_lock_irq(&__efivars->lock);
+
+ /*
+ * Ensure that the available space hasn't shrunk below the safe level
+ */
+ status = check_var_size(attributes, *size + ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS) {
+ if (status != EFI_UNSUPPORTED) {
+ err = efi_status_to_err(status);
+ goto out;
+ }
+
+ if (*size > 65536) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
+
+ status = ops->set_variable(name, vendor, attributes, *size, data);
+ if (status != EFI_SUCCESS) {
+ err = efi_status_to_err(status);
+ goto out;
+ }
+
+ *set = true;
+
+ /*
+ * Writing to the variable may have caused a change in size (which
+ * could either be an append or an overwrite), or the variable to be
+ * deleted. Perform a GetVariable() so we can tell what actually
+ * happened.
+ */
+ *size = 0;
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ NULL, size, NULL);
+
+ if (status == EFI_NOT_FOUND)
+ efivar_entry_list_del_unlock(entry);
+ else
+ spin_unlock_irq(&__efivars->lock);
+
+ if (status && status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ return 0;
+
+out:
+ spin_unlock_irq(&__efivars->lock);
+ return err;
+
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set_get_size);
+
+/**
+ * efivar_entry_iter_begin - begin iterating the variable list
+ *
+ * Lock the variable list to prevent entry insertion and removal until
+ * efivar_entry_iter_end() is called. This function is usually used in
+ * conjunction with __efivar_entry_iter() or efivar_entry_iter().
+ */
+void efivar_entry_iter_begin(void)
+{
+ spin_lock_irq(&__efivars->lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter_begin);
+
+/**
+ * efivar_entry_iter_end - finish iterating the variable list
+ *
+ * Unlock the variable list and allow modifications to the list again.
+ */
+void efivar_entry_iter_end(void)
+{
+ spin_unlock_irq(&__efivars->lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter_end);
+
+/**
+ * __efivar_entry_iter - iterate over variable list
+ * @func: callback function
+ * @head: head of the variable list
+ * @data: function-specific data to pass to callback
+ * @prev: entry to begin iterating from
+ *
+ * Iterate over the list of EFI variables and call @func with every
+ * entry on the list. It is safe for @func to remove entries in the
+ * list via efivar_entry_delete().
+ *
+ * You MUST call efivar_enter_iter_begin() before this function, and
+ * efivar_entry_iter_end() afterwards.
+ *
+ * It is possible to begin iteration from an arbitrary entry within
+ * the list by passing @prev. @prev is updated on return to point to
+ * the last entry passed to @func. To begin iterating from the
+ * beginning of the list @prev must be %NULL.
+ *
+ * The restrictions for @func are the same as documented for
+ * efivar_entry_iter().
+ */
+int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct list_head *head, void *data,
+ struct efivar_entry **prev)
+{
+ struct efivar_entry *entry, *n;
+ int err = 0;
+
+ if (!prev || !*prev) {
+ list_for_each_entry_safe(entry, n, head, list) {
+ err = func(entry, data);
+ if (err)
+ break;
+ }
+
+ if (prev)
+ *prev = entry;
+
+ return err;
+ }
+
+
+ list_for_each_entry_safe_continue((*prev), n, head, list) {
+ err = func(*prev, data);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_iter);
+
+/**
+ * efivar_entry_iter - iterate over variable list
+ * @func: callback function
+ * @head: head of variable list
+ * @data: function-specific data to pass to callback
+ *
+ * Iterate over the list of EFI variables and call @func with every
+ * entry on the list. It is safe for @func to remove entries in the
+ * list via efivar_entry_delete() while iterating.
+ *
+ * Some notes for the callback function:
+ * - a non-zero return value indicates an error and terminates the loop
+ * - @func is called from atomic context
+ */
+int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct list_head *head, void *data)
+{
+ int err = 0;
+
+ efivar_entry_iter_begin();
+ err = __efivar_entry_iter(func, head, data, NULL);
+ efivar_entry_iter_end();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter);
+
+/**
+ * efivars_kobject - get the kobject for the registered efivars
+ *
+ * If efivars_register() has not been called we return NULL,
+ * otherwise return the kobject used at registration time.
+ */
+struct kobject *efivars_kobject(void)
+{
+ if (!__efivars)
+ return NULL;
+
+ return __efivars->kobject;
+}
+EXPORT_SYMBOL_GPL(efivars_kobject);
+
+/**
+ * efivar_run_worker - schedule the efivar worker thread
+ */
+void efivar_run_worker(void)
+{
+ if (efivar_wq_enabled)
+ schedule_work(&efivar_work);
+}
+EXPORT_SYMBOL_GPL(efivar_run_worker);
+
+/**
+ * efivars_register - register an efivars
+ * @efivars: efivars to register
+ * @ops: efivars operations
+ * @kobject: @efivars-specific kobject
+ *
+ * Only a single efivars can be registered at any time.
+ */
+int efivars_register(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *kobject)
+{
+ spin_lock_init(&efivars->lock);
+ efivars->ops = ops;
+ efivars->kobject = kobject;
+
+ __efivars = efivars;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivars_register);
+
+/**
+ * efivars_unregister - unregister an efivars
+ * @efivars: efivars to unregister
+ *
+ * The caller must have already removed every entry from the list,
+ * failure to do so is an error.
+ */
+int efivars_unregister(struct efivars *efivars)
+{
+ int rv;
+
+ if (!__efivars) {
+ printk(KERN_ERR "efivars not registered\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (__efivars != efivars) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ __efivars = NULL;
+
+ rv = 0;
+out:
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efivars_unregister);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
deleted file mode 100644
index f4baa11..0000000
--- a/drivers/firmware/efivars.c
+++ /dev/null
@@ -1,2117 +0,0 @@
-/*
- * EFI Variables - efivars.c
- *
- * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
- * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
- *
- * This code takes all variables accessible from EFI runtime and
- * exports them via sysfs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Changelog:
- *
- * 17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
- * remove check for efi_enabled in exit
- * add MODULE_VERSION
- *
- * 26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
- * minor bug fixes
- *
- * 21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
- * converted driver to export variable information via sysfs
- * and moved to drivers/firmware directory
- * bumped revision number to v0.07 to reflect conversion & move
- *
- * 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * fix locking per Peter Chubb's findings
- *
- * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse()
- *
- * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * use list_for_each_safe when deleting vars.
- * remove ifdef CONFIG_SMP around include <linux/smp.h>
- * v0.04 release to linux-ia64@linuxia64.org
- *
- * 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * Moved vars from /proc/efi to /proc/efi/vars, and made
- * efi.c own the /proc/efi directory.
- * v0.03 release to linux-ia64@linuxia64.org
- *
- * 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * At the request of Stephane, moved ownership of /proc/efi
- * to efi.c, and now efivars lives under /proc/efi/vars.
- *
- * 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * Feedback received from Stephane Eranian incorporated.
- * efivar_write() checks copy_from_user() return value.
- * efivar_read/write() returns proper errno.
- * v0.02 release to linux-ia64@linuxia64.org
- *
- * 26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * v0.01 release to linux-ia64@linuxia64.org
- */
-
-#include <linux/capability.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/efi.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/pstore.h>
-#include <linux/ctype.h>
-#include <linux/ucs2_string.h>
-
-#include <linux/fs.h>
-#include <linux/ramfs.h>
-#include <linux/pagemap.h>
-
-#include <asm/uaccess.h>
-
-#define EFIVARS_VERSION "0.08"
-#define EFIVARS_DATE "2004-May-17"
-
-MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
-MODULE_DESCRIPTION("sysfs interface to EFI Variables");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(EFIVARS_VERSION);
-
-#define DUMP_NAME_LEN 52
-
-/*
- * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"))
- * not including trailing NUL
- */
-#define GUID_LEN 36
-
-static bool efivars_pstore_disable =
- IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
-
-module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
-
-/*
- * The maximum size of VariableName + Data = 1024
- * Therefore, it's reasonable to save that much
- * space in each part of the structure,
- * and we use a page for reading/writing.
- */
-
-struct efi_variable {
- efi_char16_t VariableName[1024/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
- __u32 Attributes;
-} __attribute__((packed));
-
-struct efivar_entry {
- struct efivars *efivars;
- struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
-};
-
-struct efivar_attribute {
- struct attribute attr;
- ssize_t (*show) (struct efivar_entry *entry, char *buf);
- ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
-};
-
-static struct efivars __efivars;
-static struct efivar_operations ops;
-
-#define PSTORE_EFI_ATTRIBUTES \
- (EFI_VARIABLE_NON_VOLATILE | \
- EFI_VARIABLE_BOOTSERVICE_ACCESS | \
- EFI_VARIABLE_RUNTIME_ACCESS)
-
-#define EFIVAR_ATTR(_name, _mode, _show, _store) \
-struct efivar_attribute efivar_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode}, \
- .show = _show, \
- .store = _store, \
-};
-
-#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
-#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
-
-/*
- * Prototype for sysfs creation function
- */
-static int
-efivar_create_sysfs_entry(struct efivars *efivars,
- unsigned long variable_name_size,
- efi_char16_t *variable_name,
- efi_guid_t *vendor_guid);
-
-/*
- * Prototype for workqueue functions updating sysfs entry
- */
-
-static void efivar_update_sysfs_entries(struct work_struct *);
-static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
-static bool efivar_wq_enabled = true;
-
-static bool
-validate_device_path(struct efi_variable *var, int match, u8 *buffer,
- unsigned long len)
-{
- struct efi_generic_dev_path *node;
- int offset = 0;
-
- node = (struct efi_generic_dev_path *)buffer;
-
- if (len < sizeof(*node))
- return false;
-
- while (offset <= len - sizeof(*node) &&
- node->length >= sizeof(*node) &&
- node->length <= len - offset) {
- offset += node->length;
-
- if ((node->type == EFI_DEV_END_PATH ||
- node->type == EFI_DEV_END_PATH2) &&
- node->sub_type == EFI_DEV_END_ENTIRE)
- return true;
-
- node = (struct efi_generic_dev_path *)(buffer + offset);
- }
-
- /*
- * If we're here then either node->length pointed past the end
- * of the buffer or we reached the end of the buffer without
- * finding a device path end node.
- */
- return false;
-}
-
-static bool
-validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
- unsigned long len)
-{
- /* An array of 16-bit integers */
- if ((len % 2) != 0)
- return false;
-
- return true;
-}
-
-static bool
-validate_load_option(struct efi_variable *var, int match, u8 *buffer,
- unsigned long len)
-{
- u16 filepathlength;
- int i, desclength = 0, namelen;
-
- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
-
- /* Either "Boot" or "Driver" followed by four digits of hex */
- for (i = match; i < match+4; i++) {
- if (var->VariableName[i] > 127 ||
- hex_to_bin(var->VariableName[i] & 0xff) < 0)
- return true;
- }
-
- /* Reject it if there's 4 digits of hex and then further content */
- if (namelen > match + 4)
- return false;
-
- /* A valid entry must be at least 8 bytes */
- if (len < 8)
- return false;
-
- filepathlength = buffer[4] | buffer[5] << 8;
-
- /*
- * There's no stored length for the description, so it has to be
- * found by hand
- */
- desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
-
- /* Each boot entry must have a descriptor */
- if (!desclength)
- return false;
-
- /*
- * If the sum of the length of the description, the claimed filepath
- * length and the original header are greater than the length of the
- * variable, it's malformed
- */
- if ((desclength + filepathlength + 6) > len)
- return false;
-
- /*
- * And, finally, check the filepath
- */
- return validate_device_path(var, match, buffer + desclength + 6,
- filepathlength);
-}
-
-static bool
-validate_uint16(struct efi_variable *var, int match, u8 *buffer,
- unsigned long len)
-{
- /* A single 16-bit integer */
- if (len != 2)
- return false;
-
- return true;
-}
-
-static bool
-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
- unsigned long len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (buffer[i] > 127)
- return false;
-
- if (buffer[i] == 0)
- return true;
- }
-
- return false;
-}
-
-struct variable_validate {
- char *name;
- bool (*validate)(struct efi_variable *var, int match, u8 *data,
- unsigned long len);
-};
-
-static const struct variable_validate variable_validate[] = {
- { "BootNext", validate_uint16 },
- { "BootOrder", validate_boot_order },
- { "DriverOrder", validate_boot_order },
- { "Boot*", validate_load_option },
- { "Driver*", validate_load_option },
- { "ConIn", validate_device_path },
- { "ConInDev", validate_device_path },
- { "ConOut", validate_device_path },
- { "ConOutDev", validate_device_path },
- { "ErrOut", validate_device_path },
- { "ErrOutDev", validate_device_path },
- { "Timeout", validate_uint16 },
- { "Lang", validate_ascii_string },
- { "PlatformLang", validate_ascii_string },
- { "", NULL },
-};
-
-static bool
-validate_var(struct efi_variable *var, u8 *data, unsigned long len)
-{
- int i;
- u16 *unicode_name = var->VariableName;
-
- for (i = 0; variable_validate[i].validate != NULL; i++) {
- const char *name = variable_validate[i].name;
- int match;
-
- for (match = 0; ; match++) {
- char c = name[match];
- u16 u = unicode_name[match];
-
- /* All special variables are plain ascii */
- if (u > 127)
- return true;
-
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
- return variable_validate[i].validate(var,
- match, data, len);
-
- /* Case sensitive match */
- if (c != u)
- break;
-
- /* Reached the end of the string while matching */
- if (!c)
- return variable_validate[i].validate(var,
- match, data, len);
- }
- }
-
- return true;
-}
-
-static efi_status_t
-get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
-{
- efi_status_t status;
-
- var->DataSize = 1024;
- status = efivars->ops->get_variable(var->VariableName,
- &var->VendorGuid,
- &var->Attributes,
- &var->DataSize,
- var->Data);
- return status;
-}
-
-static efi_status_t
-get_var_data(struct efivars *efivars, struct efi_variable *var)
-{
- efi_status_t status;
- unsigned long flags;
-
- spin_lock_irqsave(&efivars->lock, flags);
- status = get_var_data_locked(efivars, var);
- spin_unlock_irqrestore(&efivars->lock, flags);
-
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
- status);
- }
- return status;
-}
-
-static efi_status_t
-check_var_size_locked(struct efivars *efivars, u32 attributes,
- unsigned long size)
-{
- const struct efivar_operations *fops = efivars->ops;
-
- if (!efivars->ops->query_variable_store)
- return EFI_UNSUPPORTED;
-
- return fops->query_variable_store(attributes, size);
-}
-
-
-static efi_status_t
-check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
-{
- efi_status_t status;
- unsigned long flags;
-
- spin_lock_irqsave(&efivars->lock, flags);
- status = check_var_size_locked(efivars, attributes, size);
- spin_unlock_irqrestore(&efivars->lock, flags);
-
- return status;
-}
-
-static ssize_t
-efivar_guid_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- char *str = buf;
-
- if (!entry || !buf)
- return 0;
-
- efi_guid_unparse(&var->VendorGuid, str);
- str += strlen(str);
- str += sprintf(str, "\n");
-
- return str - buf;
-}
-
-static ssize_t
-efivar_attr_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- char *str = buf;
- efi_status_t status;
-
- if (!entry || !buf)
- return -EINVAL;
-
- status = get_var_data(entry->efivars, var);
- if (status != EFI_SUCCESS)
- return -EIO;
-
- if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
- str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
- if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
- str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
- str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
- str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
- if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
- str += sprintf(str,
- "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
- if (var->Attributes &
- EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
- str += sprintf(str,
- "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
- str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
- return str - buf;
-}
-
-static ssize_t
-efivar_size_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- char *str = buf;
- efi_status_t status;
-
- if (!entry || !buf)
- return -EINVAL;
-
- status = get_var_data(entry->efivars, var);
- if (status != EFI_SUCCESS)
- return -EIO;
-
- str += sprintf(str, "0x%lx\n", var->DataSize);
- return str - buf;
-}
-
-static ssize_t
-efivar_data_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- efi_status_t status;
-
- if (!entry || !buf)
- return -EINVAL;
-
- status = get_var_data(entry->efivars, var);
- if (status != EFI_SUCCESS)
- return -EIO;
-
- memcpy(buf, var->Data, var->DataSize);
- return var->DataSize;
-}
-/*
- * We allow each variable to be edited via rewriting the
- * entire efi variable structure.
- */
-static ssize_t
-efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
-{
- struct efi_variable *new_var, *var = &entry->var;
- struct efivars *efivars = entry->efivars;
- efi_status_t status = EFI_NOT_FOUND;
-
- if (count != sizeof(struct efi_variable))
- return -EINVAL;
-
- new_var = (struct efi_variable *)buf;
- /*
- * If only updating the variable data, then the name
- * and guid should remain the same
- */
- if (memcmp(new_var->VariableName, var->VariableName, sizeof(var->VariableName)) ||
- efi_guidcmp(new_var->VendorGuid, var->VendorGuid)) {
- printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
- return -EINVAL;
- }
-
- if ((new_var->DataSize <= 0) || (new_var->Attributes == 0)){
- printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
- return -EINVAL;
- }
-
- if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
- validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
- printk(KERN_ERR "efivars: Malformed variable content\n");
- return -EINVAL;
- }
-
- spin_lock_irq(&efivars->lock);
-
- status = check_var_size_locked(efivars, new_var->Attributes,
- new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
-
- if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
- status = efivars->ops->set_variable(new_var->VariableName,
- &new_var->VendorGuid,
- new_var->Attributes,
- new_var->DataSize,
- new_var->Data);
-
- spin_unlock_irq(&efivars->lock);
-
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
- status);
- return -EIO;
- }
-
- memcpy(&entry->var, new_var, count);
- return count;
-}
-
-static ssize_t
-efivar_show_raw(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- efi_status_t status;
-
- if (!entry || !buf)
- return 0;
-
- status = get_var_data(entry->efivars, var);
- if (status != EFI_SUCCESS)
- return -EIO;
-
- memcpy(buf, var, sizeof(*var));
- return sizeof(*var);
-}
-
-/*
- * Generic read/write functions that call the specific functions of
- * the attributes...
- */
-static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
- ssize_t ret = -EIO;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (efivar_attr->show) {
- ret = efivar_attr->show(var, buf);
- }
- return ret;
-}
-
-static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
- ssize_t ret = -EIO;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (efivar_attr->store)
- ret = efivar_attr->store(var, buf, count);
-
- return ret;
-}
-
-static const struct sysfs_ops efivar_attr_ops = {
- .show = efivar_attr_show,
- .store = efivar_attr_store,
-};
-
-static void efivar_release(struct kobject *kobj)
-{
- struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
- kfree(var);
-}
-
-static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
-static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
-static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
-static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
-static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
-
-static struct attribute *def_attrs[] = {
- &efivar_attr_guid.attr,
- &efivar_attr_size.attr,
- &efivar_attr_attributes.attr,
- &efivar_attr_data.attr,
- &efivar_attr_raw_var.attr,
- NULL,
-};
-
-static struct kobj_type efivar_ktype = {
- .release = efivar_release,
- .sysfs_ops = &efivar_attr_ops,
- .default_attrs = def_attrs,
-};
-
-static inline void
-efivar_unregister(struct efivar_entry *var)
-{
- kobject_put(&var->kobj);
-}
-
-static int efivarfs_file_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static int efi_status_to_err(efi_status_t status)
-{
- int err;
-
- switch (status) {
- case EFI_INVALID_PARAMETER:
- err = -EINVAL;
- break;
- case EFI_OUT_OF_RESOURCES:
- err = -ENOSPC;
- break;
- case EFI_DEVICE_ERROR:
- err = -EIO;
- break;
- case EFI_WRITE_PROTECTED:
- err = -EROFS;
- break;
- case EFI_SECURITY_VIOLATION:
- err = -EACCES;
- break;
- case EFI_NOT_FOUND:
- err = -EIO;
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
-static ssize_t efivarfs_file_write(struct file *file,
- const char __user *userbuf, size_t count, loff_t *ppos)
-{
- struct efivar_entry *var = file->private_data;
- struct efivars *efivars;
- efi_status_t status;
- void *data;
- u32 attributes;
- struct inode *inode = file->f_mapping->host;
- unsigned long datasize = count - sizeof(attributes);
- unsigned long newdatasize, varsize;
- ssize_t bytes = 0;
-
- if (count < sizeof(attributes))
- return -EINVAL;
-
- if (copy_from_user(&attributes, userbuf, sizeof(attributes)))
- return -EFAULT;
-
- if (attributes & ~(EFI_VARIABLE_MASK))
- return -EINVAL;
-
- efivars = var->efivars;
-
- /*
- * Ensure that the user can't allocate arbitrarily large
- * amounts of memory. Pick a default size of 64K if
- * QueryVariableInfo() isn't supported by the firmware.
- */
-
- varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
- status = check_var_size(efivars, attributes, varsize);
-
- if (status != EFI_SUCCESS) {
- if (status != EFI_UNSUPPORTED)
- return efi_status_to_err(status);
-
- if (datasize > 65536)
- return -ENOSPC;
- }
-
- data = kmalloc(datasize, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, userbuf + sizeof(attributes), datasize)) {
- bytes = -EFAULT;
- goto out;
- }
-
- if (validate_var(&var->var, data, datasize) == false) {
- bytes = -EINVAL;
- goto out;
- }
-
- /*
- * The lock here protects the get_variable call, the conditional
- * set_variable call, and removal of the variable from the efivars
- * list (in the case of an authenticated delete).
- */
- spin_lock_irq(&efivars->lock);
-
- /*
- * Ensure that the available space hasn't shrunk below the safe level
- */
-
- status = check_var_size_locked(efivars, attributes, varsize);
-
- if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
- spin_unlock_irq(&efivars->lock);
- kfree(data);
-
- return efi_status_to_err(status);
- }
-
- status = efivars->ops->set_variable(var->var.VariableName,
- &var->var.VendorGuid,
- attributes, datasize,
- data);
-
- if (status != EFI_SUCCESS) {
- spin_unlock_irq(&efivars->lock);
- kfree(data);
-
- return efi_status_to_err(status);
- }
-
- bytes = count;
-
- /*
- * Writing to the variable may have caused a change in size (which
- * could either be an append or an overwrite), or the variable to be
- * deleted. Perform a GetVariable() so we can tell what actually
- * happened.
- */
- newdatasize = 0;
- status = efivars->ops->get_variable(var->var.VariableName,
- &var->var.VendorGuid,
- NULL, &newdatasize,
- NULL);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- spin_unlock_irq(&efivars->lock);
- mutex_lock(&inode->i_mutex);
- i_size_write(inode, newdatasize + sizeof(attributes));
- mutex_unlock(&inode->i_mutex);
-
- } else if (status == EFI_NOT_FOUND) {
- list_del(&var->list);
- spin_unlock_irq(&efivars->lock);
- efivar_unregister(var);
- drop_nlink(inode);
- d_delete(file->f_dentry);
- dput(file->f_dentry);
-
- } else {
- spin_unlock_irq(&efivars->lock);
- pr_warn("efivarfs: inconsistent EFI variable implementation? "
- "status = %lx\n", status);
- }
-
-out:
- kfree(data);
-
- return bytes;
-}
-
-static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct efivar_entry *var = file->private_data;
- struct efivars *efivars = var->efivars;
- efi_status_t status;
- unsigned long datasize = 0;
- u32 attributes;
- void *data;
- ssize_t size = 0;
-
- spin_lock_irq(&efivars->lock);
- status = efivars->ops->get_variable(var->var.VariableName,
- &var->var.VendorGuid,
- &attributes, &datasize, NULL);
- spin_unlock_irq(&efivars->lock);
-
- if (status != EFI_BUFFER_TOO_SMALL)
- return efi_status_to_err(status);
-
- data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
-
- if (!data)
- return -ENOMEM;
-
- spin_lock_irq(&efivars->lock);
- status = efivars->ops->get_variable(var->var.VariableName,
- &var->var.VendorGuid,
- &attributes, &datasize,
- (data + sizeof(attributes)));
- spin_unlock_irq(&efivars->lock);
-
- if (status != EFI_SUCCESS) {
- size = efi_status_to_err(status);
- goto out_free;
- }
-
- memcpy(data, &attributes, sizeof(attributes));
- size = simple_read_from_buffer(userbuf, count, ppos,
- data, datasize + sizeof(attributes));
-out_free:
- kfree(data);
-
- return size;
-}
-
-static void efivarfs_evict_inode(struct inode *inode)
-{
- clear_inode(inode);
-}
-
-static const struct super_operations efivarfs_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
- .evict_inode = efivarfs_evict_inode,
- .show_options = generic_show_options,
-};
-
-static struct super_block *efivarfs_sb;
-
-static const struct inode_operations efivarfs_dir_inode_operations;
-
-static const struct file_operations efivarfs_file_operations = {
- .open = efivarfs_file_open,
- .read = efivarfs_file_read,
- .write = efivarfs_file_write,
- .llseek = no_llseek,
-};
-
-static struct inode *efivarfs_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev)
-{
- struct inode *inode = new_inode(sb);
-
- if (inode) {
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- switch (mode & S_IFMT) {
- case S_IFREG:
- inode->i_fop = &efivarfs_file_operations;
- break;
- case S_IFDIR:
- inode->i_op = &efivarfs_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- inc_nlink(inode);
- break;
- }
- }
- return inode;
-}
-
-/*
- * Return true if 'str' is a valid efivarfs filename of the form,
- *
- * VariableName-12345678-1234-1234-1234-1234567891bc
- */
-static bool efivarfs_valid_name(const char *str, int len)
-{
- static const char dashes[GUID_LEN] = {
- [8] = 1, [13] = 1, [18] = 1, [23] = 1
- };
- const char *s = str + len - GUID_LEN;
- int i;
-
- /*
- * We need a GUID, plus at least one letter for the variable name,
- * plus the '-' separator
- */
- if (len < GUID_LEN + 2)
- return false;
-
- /* GUID must be preceded by a '-' */
- if (*(s - 1) != '-')
- return false;
-
- /*
- * Validate that 's' is of the correct format, e.g.
- *
- * 12345678-1234-1234-1234-123456789abc
- */
- for (i = 0; i < GUID_LEN; i++) {
- if (dashes[i]) {
- if (*s++ != '-')
- return false;
- } else {
- if (!isxdigit(*s++))
- return false;
- }
- }
-
- return true;
-}
-
-static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
-{
- guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
- guid->b[1] = hex_to_bin(str[4]) << 4 | hex_to_bin(str[5]);
- guid->b[2] = hex_to_bin(str[2]) << 4 | hex_to_bin(str[3]);
- guid->b[3] = hex_to_bin(str[0]) << 4 | hex_to_bin(str[1]);
- guid->b[4] = hex_to_bin(str[11]) << 4 | hex_to_bin(str[12]);
- guid->b[5] = hex_to_bin(str[9]) << 4 | hex_to_bin(str[10]);
- guid->b[6] = hex_to_bin(str[16]) << 4 | hex_to_bin(str[17]);
- guid->b[7] = hex_to_bin(str[14]) << 4 | hex_to_bin(str[15]);
- guid->b[8] = hex_to_bin(str[19]) << 4 | hex_to_bin(str[20]);
- guid->b[9] = hex_to_bin(str[21]) << 4 | hex_to_bin(str[22]);
- guid->b[10] = hex_to_bin(str[24]) << 4 | hex_to_bin(str[25]);
- guid->b[11] = hex_to_bin(str[26]) << 4 | hex_to_bin(str[27]);
- guid->b[12] = hex_to_bin(str[28]) << 4 | hex_to_bin(str[29]);
- guid->b[13] = hex_to_bin(str[30]) << 4 | hex_to_bin(str[31]);
- guid->b[14] = hex_to_bin(str[32]) << 4 | hex_to_bin(str[33]);
- guid->b[15] = hex_to_bin(str[34]) << 4 | hex_to_bin(str[35]);
-}
-
-static int efivarfs_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
-{
- struct inode *inode;
- struct efivars *efivars = &__efivars;
- struct efivar_entry *var;
- int namelen, i = 0, err = 0;
-
- if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
- return -EINVAL;
-
- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
- if (!inode)
- return -ENOMEM;
-
- var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
- if (!var) {
- err = -ENOMEM;
- goto out;
- }
-
- /* length of the variable name itself: remove GUID and separator */
- namelen = dentry->d_name.len - GUID_LEN - 1;
-
- efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
- &var->var.VendorGuid);
-
- for (i = 0; i < namelen; i++)
- var->var.VariableName[i] = dentry->d_name.name[i];
-
- var->var.VariableName[i] = '\0';
-
- inode->i_private = var;
- var->efivars = efivars;
- var->kobj.kset = efivars->kset;
-
- err = kobject_init_and_add(&var->kobj, &efivar_ktype, NULL, "%s",
- dentry->d_name.name);
- if (err)
- goto out;
-
- kobject_uevent(&var->kobj, KOBJ_ADD);
- spin_lock_irq(&efivars->lock);
- list_add(&var->list, &efivars->list);
- spin_unlock_irq(&efivars->lock);
- d_instantiate(dentry, inode);
- dget(dentry);
-out:
- if (err) {
- kfree(var);
- iput(inode);
- }
- return err;
-}
-
-static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
-{
- struct efivar_entry *var = dentry->d_inode->i_private;
- struct efivars *efivars = var->efivars;
- efi_status_t status;
-
- spin_lock_irq(&efivars->lock);
-
- status = efivars->ops->set_variable(var->var.VariableName,
- &var->var.VendorGuid,
- 0, 0, NULL);
-
- if (status == EFI_SUCCESS || status == EFI_NOT_FOUND) {
- list_del(&var->list);
- spin_unlock_irq(&efivars->lock);
- efivar_unregister(var);
- drop_nlink(dentry->d_inode);
- dput(dentry);
- return 0;
- }
-
- spin_unlock_irq(&efivars->lock);
- return -EINVAL;
-};
-
-/*
- * Compare two efivarfs file names.
- *
- * An efivarfs filename is composed of two parts,
- *
- * 1. A case-sensitive variable name
- * 2. A case-insensitive GUID
- *
- * So we need to perform a case-sensitive match on part 1 and a
- * case-insensitive match on part 2.
- */
-static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode,
- const struct dentry *dentry, const struct inode *inode,
- unsigned int len, const char *str,
- const struct qstr *name)
-{
- int guid = len - GUID_LEN;
-
- if (name->len != len)
- return 1;
-
- /* Case-sensitive compare for the variable name */
- if (memcmp(str, name->name, guid))
- return 1;
-
- /* Case-insensitive compare for the GUID */
- return strncasecmp(name->name + guid, str + guid, GUID_LEN);
-}
-
-static int efivarfs_d_hash(const struct dentry *dentry,
- const struct inode *inode, struct qstr *qstr)
-{
- unsigned long hash = init_name_hash();
- const unsigned char *s = qstr->name;
- unsigned int len = qstr->len;
-
- if (!efivarfs_valid_name(s, len))
- return -EINVAL;
-
- while (len-- > GUID_LEN)
- hash = partial_name_hash(*s++, hash);
-
- /* GUID is case-insensitive. */
- while (len--)
- hash = partial_name_hash(tolower(*s++), hash);
-
- qstr->hash = end_name_hash(hash);
- return 0;
-}
-
-/*
- * Retaining negative dentries for an in-memory filesystem just wastes
- * memory and lookup time: arrange for them to be deleted immediately.
- */
-static int efivarfs_delete_dentry(const struct dentry *dentry)
-{
- return 1;
-}
-
-static struct dentry_operations efivarfs_d_ops = {
- .d_compare = efivarfs_d_compare,
- .d_hash = efivarfs_d_hash,
- .d_delete = efivarfs_delete_dentry,
-};
-
-static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
-{
- struct dentry *d;
- struct qstr q;
- int err;
-
- q.name = name;
- q.len = strlen(name);
-
- err = efivarfs_d_hash(NULL, NULL, &q);
- if (err)
- return ERR_PTR(err);
-
- d = d_alloc(parent, &q);
- if (d)
- return d;
-
- return ERR_PTR(-ENOMEM);
-}
-
-static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct inode *inode = NULL;
- struct dentry *root;
- struct efivar_entry *entry, *n;
- struct efivars *efivars = &__efivars;
- char *name;
- int err = -ENOMEM;
-
- efivarfs_sb = sb;
-
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = EFIVARFS_MAGIC;
- sb->s_op = &efivarfs_ops;
- sb->s_d_op = &efivarfs_d_ops;
- sb->s_time_gran = 1;
-
- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
- if (!inode)
- return -ENOMEM;
- inode->i_op = &efivarfs_dir_inode_operations;
-
- root = d_make_root(inode);
- sb->s_root = root;
- if (!root)
- return -ENOMEM;
-
- list_for_each_entry_safe(entry, n, &efivars->list, list) {
- struct dentry *dentry, *root = efivarfs_sb->s_root;
- unsigned long size = 0;
- int len, i;
-
- inode = NULL;
-
- len = ucs2_strlen(entry->var.VariableName);
-
- /* name, plus '-', plus GUID, plus NUL*/
- name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
- if (!name)
- goto fail;
-
- for (i = 0; i < len; i++)
- name[i] = entry->var.VariableName[i] & 0xFF;
-
- name[len] = '-';
-
- efi_guid_unparse(&entry->var.VendorGuid, name + len + 1);
-
- name[len+GUID_LEN+1] = '\0';
-
- inode = efivarfs_get_inode(efivarfs_sb, root->d_inode,
- S_IFREG | 0644, 0);
- if (!inode)
- goto fail_name;
-
- dentry = efivarfs_alloc_dentry(root, name);
- if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- goto fail_inode;
- }
-
- /* copied by the above to local storage in the dentry. */
- kfree(name);
-
- spin_lock_irq(&efivars->lock);
- efivars->ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- &entry->var.Attributes,
- &size,
- NULL);
- spin_unlock_irq(&efivars->lock);
-
- mutex_lock(&inode->i_mutex);
- inode->i_private = entry;
- i_size_write(inode, size + sizeof(entry->var.Attributes));
- mutex_unlock(&inode->i_mutex);
- d_add(dentry, inode);
- }
-
- return 0;
-
-fail_inode:
- iput(inode);
-fail_name:
- kfree(name);
-fail:
- return err;
-}
-
-static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_single(fs_type, flags, data, efivarfs_fill_super);
-}
-
-static void efivarfs_kill_sb(struct super_block *sb)
-{
- kill_litter_super(sb);
- efivarfs_sb = NULL;
-}
-
-static struct file_system_type efivarfs_type = {
- .name = "efivarfs",
- .mount = efivarfs_mount,
- .kill_sb = efivarfs_kill_sb,
-};
-MODULE_ALIAS_FS("efivarfs");
-
-/*
- * Handle negative dentry.
- */
-static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
-{
- if (dentry->d_name.len > NAME_MAX)
- return ERR_PTR(-ENAMETOOLONG);
- d_add(dentry, NULL);
- return NULL;
-}
-
-static const struct inode_operations efivarfs_dir_inode_operations = {
- .lookup = efivarfs_lookup,
- .unlink = efivarfs_unlink,
- .create = efivarfs_create,
-};
-
-#ifdef CONFIG_EFI_VARS_PSTORE
-
-static int efi_pstore_open(struct pstore_info *psi)
-{
- struct efivars *efivars = psi->data;
-
- spin_lock_irq(&efivars->lock);
- efivars->walk_entry = list_first_entry(&efivars->list,
- struct efivar_entry, list);
- return 0;
-}
-
-static int efi_pstore_close(struct pstore_info *psi)
-{
- struct efivars *efivars = psi->data;
-
- spin_unlock_irq(&efivars->lock);
- return 0;
-}
-
-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *timespec,
- char **buf, struct pstore_info *psi)
-{
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- struct efivars *efivars = psi->data;
- char name[DUMP_NAME_LEN];
- int i;
- int cnt;
- unsigned int part, size;
- unsigned long time;
-
- while (&efivars->walk_entry->list != &efivars->list) {
- if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
- vendor)) {
- for (i = 0; i < DUMP_NAME_LEN; i++) {
- name[i] = efivars->walk_entry->var.VariableName[i];
- }
- if (sscanf(name, "dump-type%u-%u-%d-%lu",
- type, &part, &cnt, &time) == 4) {
- *id = part;
- *count = cnt;
- timespec->tv_sec = time;
- timespec->tv_nsec = 0;
- } else if (sscanf(name, "dump-type%u-%u-%lu",
- type, &part, &time) == 3) {
- /*
- * Check if an old format,
- * which doesn't support holding
- * multiple logs, remains.
- */
- *id = part;
- *count = 0;
- timespec->tv_sec = time;
- timespec->tv_nsec = 0;
- } else {
- efivars->walk_entry = list_entry(
- efivars->walk_entry->list.next,
- struct efivar_entry, list);
- continue;
- }
-
- get_var_data_locked(efivars, &efivars->walk_entry->var);
- size = efivars->walk_entry->var.DataSize;
- *buf = kmalloc(size, GFP_KERNEL);
- if (*buf == NULL)
- return -ENOMEM;
- memcpy(*buf, efivars->walk_entry->var.Data,
- size);
- efivars->walk_entry = list_entry(
- efivars->walk_entry->list.next,
- struct efivar_entry, list);
- return size;
- }
- efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
- struct efivar_entry, list);
- }
- return 0;
-}
-
-static int efi_pstore_write(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, size_t size,
- struct pstore_info *psi)
-{
- char name[DUMP_NAME_LEN];
- efi_char16_t efi_name[DUMP_NAME_LEN];
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- struct efivars *efivars = psi->data;
- int i, ret = 0;
- efi_status_t status = EFI_NOT_FOUND;
- unsigned long flags;
-
- if (pstore_cannot_block_path(reason)) {
- /*
- * If the lock is taken by another cpu in non-blocking path,
- * this driver returns without entering firmware to avoid
- * hanging up.
- */
- if (!spin_trylock_irqsave(&efivars->lock, flags))
- return -EBUSY;
- } else
- spin_lock_irqsave(&efivars->lock, flags);
-
- /*
- * Check if there is a space enough to log.
- * size: a size of logging data
- * DUMP_NAME_LEN * 2: a maximum size of variable name
- */
-
- status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
- size + DUMP_NAME_LEN * 2);
-
- if (status) {
- spin_unlock_irqrestore(&efivars->lock, flags);
- *id = part;
- return -ENOSPC;
- }
-
- sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count,
- get_seconds());
-
- for (i = 0; i < DUMP_NAME_LEN; i++)
- efi_name[i] = name[i];
-
- efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
- size, psi->buf);
-
- spin_unlock_irqrestore(&efivars->lock, flags);
-
- if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
- schedule_work(&efivar_work);
-
- *id = part;
- return ret;
-};
-
-static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
- struct timespec time, struct pstore_info *psi)
-{
- char name[DUMP_NAME_LEN];
- efi_char16_t efi_name[DUMP_NAME_LEN];
- char name_old[DUMP_NAME_LEN];
- efi_char16_t efi_name_old[DUMP_NAME_LEN];
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- struct efivars *efivars = psi->data;
- struct efivar_entry *entry, *found = NULL;
- int i;
-
- sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
- time.tv_sec);
-
- spin_lock_irq(&efivars->lock);
-
- for (i = 0; i < DUMP_NAME_LEN; i++)
- efi_name[i] = name[i];
-
- /*
- * Clean up an entry with the same name
- */
-
- list_for_each_entry(entry, &efivars->list, list) {
- get_var_data_locked(efivars, &entry->var);
-
- if (efi_guidcmp(entry->var.VendorGuid, vendor))
- continue;
- if (ucs2_strncmp(entry->var.VariableName, efi_name,
- ucs2_strlen(efi_name))) {
- /*
- * Check if an old format,
- * which doesn't support holding
- * multiple logs, remains.
- */
- sprintf(name_old, "dump-type%u-%u-%lu", type,
- (unsigned int)id, time.tv_sec);
-
- for (i = 0; i < DUMP_NAME_LEN; i++)
- efi_name_old[i] = name_old[i];
-
- if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
- ucs2_strlen(efi_name_old)))
- continue;
- }
-
- /* found */
- found = entry;
- efivars->ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- PSTORE_EFI_ATTRIBUTES,
- 0, NULL);
- break;
- }
-
- if (found)
- list_del(&found->list);
-
- spin_unlock_irq(&efivars->lock);
-
- if (found)
- efivar_unregister(found);
-
- return 0;
-}
-
-static struct pstore_info efi_pstore_info = {
- .owner = THIS_MODULE,
- .name = "efi",
- .open = efi_pstore_open,
- .close = efi_pstore_close,
- .read = efi_pstore_read,
- .write = efi_pstore_write,
- .erase = efi_pstore_erase,
-};
-
-static void efivar_pstore_register(struct efivars *efivars)
-{
- efivars->efi_pstore_info = efi_pstore_info;
- efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
- if (efivars->efi_pstore_info.buf) {
- efivars->efi_pstore_info.bufsize = 1024;
- efivars->efi_pstore_info.data = efivars;
- spin_lock_init(&efivars->efi_pstore_info.buf_lock);
- pstore_register(&efivars->efi_pstore_info);
- }
-}
-#else
-static void efivar_pstore_register(struct efivars *efivars)
-{
- return;
-}
-#endif
-
-static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct efi_variable *new_var = (struct efi_variable *)buf;
- struct efivars *efivars = bin_attr->private;
- struct efivar_entry *search_efivar, *n;
- unsigned long strsize1, strsize2;
- efi_status_t status = EFI_NOT_FOUND;
- int found = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
- validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
- printk(KERN_ERR "efivars: Malformed variable content\n");
- return -EINVAL;
- }
-
- spin_lock_irq(&efivars->lock);
-
- /*
- * Does this variable already exist?
- */
- list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = ucs2_strsize(new_var->VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(&(search_efivar->var.VariableName),
- new_var->VariableName, strsize1) &&
- !efi_guidcmp(search_efivar->var.VendorGuid,
- new_var->VendorGuid)) {
- found = 1;
- break;
- }
- }
- if (found) {
- spin_unlock_irq(&efivars->lock);
- return -EINVAL;
- }
-
- status = check_var_size_locked(efivars, new_var->Attributes,
- new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
-
- if (status && status != EFI_UNSUPPORTED) {
- spin_unlock_irq(&efivars->lock);
- return efi_status_to_err(status);
- }
-
- /* now *really* create the variable via EFI */
- status = efivars->ops->set_variable(new_var->VariableName,
- &new_var->VendorGuid,
- new_var->Attributes,
- new_var->DataSize,
- new_var->Data);
-
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
- status);
- spin_unlock_irq(&efivars->lock);
- return -EIO;
- }
- spin_unlock_irq(&efivars->lock);
-
- /* Create the entry in sysfs. Locking is not required here */
- status = efivar_create_sysfs_entry(efivars,
- ucs2_strsize(new_var->VariableName,
- 1024),
- new_var->VariableName,
- &new_var->VendorGuid);
- if (status) {
- printk(KERN_WARNING "efivars: variable created, but sysfs entry wasn't.\n");
- }
- return count;
-}
-
-static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct efi_variable *del_var = (struct efi_variable *)buf;
- struct efivars *efivars = bin_attr->private;
- struct efivar_entry *search_efivar, *n;
- unsigned long strsize1, strsize2;
- efi_status_t status = EFI_NOT_FOUND;
- int found = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- spin_lock_irq(&efivars->lock);
-
- /*
- * Does this variable already exist?
- */
- list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = ucs2_strsize(del_var->VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(&(search_efivar->var.VariableName),
- del_var->VariableName, strsize1) &&
- !efi_guidcmp(search_efivar->var.VendorGuid,
- del_var->VendorGuid)) {
- found = 1;
- break;
- }
- }
- if (!found) {
- spin_unlock_irq(&efivars->lock);
- return -EINVAL;
- }
- /* force the Attributes/DataSize to 0 to ensure deletion */
- del_var->Attributes = 0;
- del_var->DataSize = 0;
-
- status = efivars->ops->set_variable(del_var->VariableName,
- &del_var->VendorGuid,
- del_var->Attributes,
- del_var->DataSize,
- del_var->Data);
-
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
- status);
- spin_unlock_irq(&efivars->lock);
- return -EIO;
- }
- list_del(&search_efivar->list);
- /* We need to release this lock before unregistering. */
- spin_unlock_irq(&efivars->lock);
- efivar_unregister(search_efivar);
-
- /* It's dead Jim.... */
- return count;
-}
-
-static bool variable_is_present(struct efivars *efivars,
- efi_char16_t *variable_name,
- efi_guid_t *vendor)
-{
- struct efivar_entry *entry, *n;
- unsigned long strsize1, strsize2;
- bool found = false;
-
- strsize1 = ucs2_strsize(variable_name, 1024);
- list_for_each_entry_safe(entry, n, &efivars->list, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(variable_name, &(entry->var.VariableName),
- strsize2) &&
- !efi_guidcmp(entry->var.VendorGuid,
- *vendor)) {
- found = true;
- break;
- }
- }
- return found;
-}
-
-/*
- * Returns the size of variable_name, in bytes, including the
- * terminating NULL character, or variable_name_size if no NULL
- * character is found among the first variable_name_size bytes.
- */
-static unsigned long var_name_strnsize(efi_char16_t *variable_name,
- unsigned long variable_name_size)
-{
- unsigned long len;
- efi_char16_t c;
-
- /*
- * The variable name is, by definition, a NULL-terminated
- * string, so make absolutely sure that variable_name_size is
- * the value we expect it to be. If not, return the real size.
- */
- for (len = 2; len <= variable_name_size; len += sizeof(c)) {
- c = variable_name[(len / sizeof(c)) - 1];
- if (!c)
- break;
- }
-
- return min(len, variable_name_size);
-}
-
-static void efivar_update_sysfs_entries(struct work_struct *work)
-{
- struct efivars *efivars = &__efivars;
- efi_guid_t vendor;
- efi_char16_t *variable_name;
- unsigned long variable_name_size = 1024;
- efi_status_t status = EFI_NOT_FOUND;
- bool found;
-
- /* Add new sysfs entries */
- while (1) {
- variable_name = kzalloc(variable_name_size, GFP_KERNEL);
- if (!variable_name) {
- pr_err("efivars: Memory allocation failed.\n");
- return;
- }
-
- spin_lock_irq(&efivars->lock);
- found = false;
- while (1) {
- variable_name_size = 1024;
- status = efivars->ops->get_next_variable(
- &variable_name_size,
- variable_name,
- &vendor);
- if (status != EFI_SUCCESS) {
- break;
- } else {
- if (!variable_is_present(efivars,
- variable_name, &vendor)) {
- found = true;
- break;
- }
- }
- }
- spin_unlock_irq(&efivars->lock);
-
- if (!found) {
- kfree(variable_name);
- break;
- } else {
- variable_name_size = var_name_strnsize(variable_name,
- variable_name_size);
- efivar_create_sysfs_entry(efivars,
- variable_name_size,
- variable_name, &vendor);
- }
- }
-}
-
-/*
- * Let's not leave out systab information that snuck into
- * the efivars driver
- */
-static ssize_t systab_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- char *str = buf;
-
- if (!kobj || !buf)
- return -EINVAL;
-
- if (efi.mps != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "MPS=0x%lx\n", efi.mps);
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
- if (efi.acpi != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
- if (efi.smbios != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
- if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
- if (efi.uga != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "UGA=0x%lx\n", efi.uga);
-
- return str - buf;
-}
-
-static struct kobj_attribute efi_attr_systab =
- __ATTR(systab, 0400, systab_show, NULL);
-
-static struct attribute *efi_subsys_attrs[] = {
- &efi_attr_systab.attr,
- NULL, /* maybe more in the future? */
-};
-
-static struct attribute_group efi_subsys_attr_group = {
- .attrs = efi_subsys_attrs,
-};
-
-static struct kobject *efi_kobj;
-
-/*
- * efivar_create_sysfs_entry()
- * Requires:
- * variable_name_size = number of bytes required to hold
- * variable_name (not counting the NULL
- * character at the end.
- * efivars->lock is not held on entry or exit.
- * Returns 1 on failure, 0 on success
- */
-static int
-efivar_create_sysfs_entry(struct efivars *efivars,
- unsigned long variable_name_size,
- efi_char16_t *variable_name,
- efi_guid_t *vendor_guid)
-{
- int i, short_name_size;
- char *short_name;
- struct efivar_entry *new_efivar;
-
- /*
- * Length of the variable bytes in ASCII, plus the '-' separator,
- * plus the GUID, plus trailing NUL
- */
- short_name_size = variable_name_size / sizeof(efi_char16_t)
- + 1 + GUID_LEN + 1;
-
- short_name = kzalloc(short_name_size, GFP_KERNEL);
- new_efivar = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
-
- if (!short_name || !new_efivar) {
- kfree(short_name);
- kfree(new_efivar);
- return 1;
- }
-
- new_efivar->efivars = efivars;
- memcpy(new_efivar->var.VariableName, variable_name,
- variable_name_size);
- memcpy(&(new_efivar->var.VendorGuid), vendor_guid, sizeof(efi_guid_t));
-
- /* Convert Unicode to normal chars (assume top bits are 0),
- ala UTF-8 */
- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
- short_name[i] = variable_name[i] & 0xFF;
- }
- /* This is ugly, but necessary to separate one vendor's
- private variables from another's. */
-
- *(short_name + strlen(short_name)) = '-';
- efi_guid_unparse(vendor_guid, short_name + strlen(short_name));
-
- new_efivar->kobj.kset = efivars->kset;
- i = kobject_init_and_add(&new_efivar->kobj, &efivar_ktype, NULL,
- "%s", short_name);
- if (i) {
- kfree(short_name);
- kfree(new_efivar);
- return 1;
- }
-
- kobject_uevent(&new_efivar->kobj, KOBJ_ADD);
- kfree(short_name);
- short_name = NULL;
-
- spin_lock_irq(&efivars->lock);
- list_add(&new_efivar->list, &efivars->list);
- spin_unlock_irq(&efivars->lock);
-
- return 0;
-}
-
-static int
-create_efivars_bin_attributes(struct efivars *efivars)
-{
- struct bin_attribute *attr;
- int error;
-
- /* new_var */
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr)
- return -ENOMEM;
-
- attr->attr.name = "new_var";
- attr->attr.mode = 0200;
- attr->write = efivar_create;
- attr->private = efivars;
- efivars->new_var = attr;
-
- /* del_var */
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr) {
- error = -ENOMEM;
- goto out_free;
- }
- attr->attr.name = "del_var";
- attr->attr.mode = 0200;
- attr->write = efivar_delete;
- attr->private = efivars;
- efivars->del_var = attr;
-
- sysfs_bin_attr_init(efivars->new_var);
- sysfs_bin_attr_init(efivars->del_var);
-
- /* Register */
- error = sysfs_create_bin_file(&efivars->kset->kobj,
- efivars->new_var);
- if (error) {
- printk(KERN_ERR "efivars: unable to create new_var sysfs file"
- " due to error %d\n", error);
- goto out_free;
- }
- error = sysfs_create_bin_file(&efivars->kset->kobj,
- efivars->del_var);
- if (error) {
- printk(KERN_ERR "efivars: unable to create del_var sysfs file"
- " due to error %d\n", error);
- sysfs_remove_bin_file(&efivars->kset->kobj,
- efivars->new_var);
- goto out_free;
- }
-
- return 0;
-out_free:
- kfree(efivars->del_var);
- efivars->del_var = NULL;
- kfree(efivars->new_var);
- efivars->new_var = NULL;
- return error;
-}
-
-void unregister_efivars(struct efivars *efivars)
-{
- struct efivar_entry *entry, *n;
-
- list_for_each_entry_safe(entry, n, &efivars->list, list) {
- spin_lock_irq(&efivars->lock);
- list_del(&entry->list);
- spin_unlock_irq(&efivars->lock);
- efivar_unregister(entry);
- }
- if (efivars->new_var)
- sysfs_remove_bin_file(&efivars->kset->kobj, efivars->new_var);
- if (efivars->del_var)
- sysfs_remove_bin_file(&efivars->kset->kobj, efivars->del_var);
- kfree(efivars->new_var);
- kfree(efivars->del_var);
- kobject_put(efivars->kobject);
- kset_unregister(efivars->kset);
-}
-EXPORT_SYMBOL_GPL(unregister_efivars);
-
-/*
- * Print a warning when duplicate EFI variables are encountered and
- * disable the sysfs workqueue since the firmware is buggy.
- */
-static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
- unsigned long len16)
-{
- size_t i, len8 = len16 / sizeof(efi_char16_t);
- char *s8;
-
- /*
- * Disable the workqueue since the algorithm it uses for
- * detecting new variables won't work with this buggy
- * implementation of GetNextVariableName().
- */
- efivar_wq_enabled = false;
-
- s8 = kzalloc(len8, GFP_KERNEL);
- if (!s8)
- return;
-
- for (i = 0; i < len8; i++)
- s8[i] = s16[i];
-
- printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
- s8, vendor_guid);
- kfree(s8);
-}
-
-int register_efivars(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *parent_kobj)
-{
- efi_status_t status = EFI_NOT_FOUND;
- efi_guid_t vendor_guid;
- efi_char16_t *variable_name;
- unsigned long variable_name_size = 1024;
- int error = 0;
-
- variable_name = kzalloc(variable_name_size, GFP_KERNEL);
- if (!variable_name) {
- printk(KERN_ERR "efivars: Memory allocation failed.\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&efivars->lock);
- INIT_LIST_HEAD(&efivars->list);
- efivars->ops = ops;
-
- efivars->kset = kset_create_and_add("vars", NULL, parent_kobj);
- if (!efivars->kset) {
- printk(KERN_ERR "efivars: Subsystem registration failed.\n");
- error = -ENOMEM;
- goto out;
- }
-
- efivars->kobject = kobject_create_and_add("efivars", parent_kobj);
- if (!efivars->kobject) {
- pr_err("efivars: Subsystem registration failed.\n");
- error = -ENOMEM;
- kset_unregister(efivars->kset);
- goto out;
- }
-
- /*
- * Per EFI spec, the maximum storage allocated for both
- * the variable name and variable data is 1024 bytes.
- */
-
- do {
- variable_name_size = 1024;
-
- status = ops->get_next_variable(&variable_name_size,
- variable_name,
- &vendor_guid);
- switch (status) {
- case EFI_SUCCESS:
- variable_name_size = var_name_strnsize(variable_name,
- variable_name_size);
-
- /*
- * Some firmware implementations return the
- * same variable name on multiple calls to
- * get_next_variable(). Terminate the loop
- * immediately as there is no guarantee that
- * we'll ever see a different variable name,
- * and may end up looping here forever.
- */
- if (variable_is_present(efivars, variable_name,
- &vendor_guid)) {
- dup_variable_bug(variable_name, &vendor_guid,
- variable_name_size);
- status = EFI_NOT_FOUND;
- break;
- }
-
- efivar_create_sysfs_entry(efivars,
- variable_name_size,
- variable_name,
- &vendor_guid);
- break;
- case EFI_NOT_FOUND:
- break;
- default:
- printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
- status);
- status = EFI_NOT_FOUND;
- break;
- }
- } while (status != EFI_NOT_FOUND);
-
- error = create_efivars_bin_attributes(efivars);
- if (error)
- unregister_efivars(efivars);
-
- if (!efivars_pstore_disable)
- efivar_pstore_register(efivars);
-
- register_filesystem(&efivarfs_type);
-
-out:
- kfree(variable_name);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(register_efivars);
-
-/*
- * For now we register the efi subsystem with the firmware subsystem
- * and the vars subsystem with the efi subsystem. In the future, it
- * might make sense to split off the efi subsystem into its own
- * driver, but for now only efivars will register with it, so just
- * include it here.
- */
-
-static int __init
-efivars_init(void)
-{
- int error = 0;
-
- printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
- EFIVARS_DATE);
-
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
- return 0;
-
- /* For now we'll register the efi directory at /sys/firmware/efi */
- efi_kobj = kobject_create_and_add("efi", firmware_kobj);
- if (!efi_kobj) {
- printk(KERN_ERR "efivars: Firmware registration failed.\n");
- return -ENOMEM;
- }
-
- ops.get_variable = efi.get_variable;
- ops.set_variable = efi.set_variable;
- ops.get_next_variable = efi.get_next_variable;
- ops.query_variable_store = efi_query_variable_store;
-
- error = register_efivars(&__efivars, &ops, efi_kobj);
- if (error)
- goto err_put;
-
- /* Don't forget the systab entry */
- error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
- if (error) {
- printk(KERN_ERR
- "efivars: Sysfs attribute export failed with error %d.\n",
- error);
- goto err_unregister;
- }
-
- return 0;
-
-err_unregister:
- unregister_efivars(&__efivars);
-err_put:
- kobject_put(efi_kobj);
- return error;
-}
-
-static void __exit
-efivars_exit(void)
-{
- cancel_work_sync(&efivar_work);
-
- if (efi_enabled(EFI_RUNTIME_SERVICES)) {
- unregister_efivars(&__efivars);
- kobject_put(efi_kobj);
- }
-}
-
-module_init(efivars_init);
-module_exit(efivars_exit);
-
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 91ddf0f..acba0b9 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/efi.h>
#include <linux/module.h>
+#include <linux/ucs2_string.h>
#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
@@ -288,17 +289,6 @@ static int gsmi_exec(u8 func, u8 sub)
return rc;
}
-/* Return the number of unicode characters in data */
-static size_t
-utf16_strlen(efi_char16_t *data, unsigned long maxlength)
-{
- unsigned long length = 0;
-
- while (*data++ != 0 && length < maxlength)
- length++;
- return length;
-}
-
static efi_status_t gsmi_get_variable(efi_char16_t *name,
efi_guid_t *vendor, u32 *attr,
unsigned long *data_size,
@@ -311,7 +301,7 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
};
efi_status_t ret = EFI_SUCCESS;
unsigned long flags;
- size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2);
+ size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
int rc;
if (name_len >= GSMI_BUF_SIZE / 2)
@@ -380,7 +370,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
return EFI_BAD_BUFFER_SIZE;
/* Let's make sure the thing is at least null-terminated */
- if (utf16_strlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
+ if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
return EFI_INVALID_PARAMETER;
spin_lock_irqsave(&gsmi_dev.lock, flags);
@@ -408,7 +398,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
/* Copy the name back */
memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE);
- *name_size = utf16_strlen(name, GSMI_BUF_SIZE / 2) * 2;
+ *name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2;
/* copy guid to return buffer */
memcpy(vendor, &param.guid, sizeof(param.guid));
@@ -434,7 +424,7 @@ static efi_status_t gsmi_set_variable(efi_char16_t *name,
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
};
- size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2);
+ size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
efi_status_t ret = EFI_SUCCESS;
int rc;
unsigned long flags;
@@ -893,12 +883,19 @@ static __init int gsmi_init(void)
goto out_remove_bin_file;
}
- ret = register_efivars(&efivars, &efivar_ops, gsmi_kobj);
+ ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
goto out_remove_sysfs_files;
}
+ ret = efivars_sysfs_init();
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to create efivars files\n");
+ efivars_unregister(&efivars);
+ goto out_remove_sysfs_files;
+ }
+
register_reboot_notifier(&gsmi_reboot_notifier);
register_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
@@ -930,7 +927,7 @@ static void __exit gsmi_exit(void)
unregister_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_unregister(&panic_notifier_list,
&gsmi_panic_notifier);
- unregister_efivars(&efivars);
+ efivars_unregister(&efivars);
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 93aaadf..c22eed9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -204,6 +204,12 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_RCAR
+ tristate "Renesas R-Car GPIO"
+ depends on ARM
+ help
+ Say yes here to support GPIO on Renesas R-Car SoCs.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -227,12 +233,6 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
-config GPIO_VT8500
- bool "VIA/Wondermedia SoC GPIO Support"
- depends on ARCH_VT8500
- help
- Say yes here to support the VT8500/WM8505/WM8650 GPIO controller.
-
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -303,12 +303,21 @@ config GPIO_GE_FPGA
config GPIO_LYNXPOINT
bool "Intel Lynxpoint GPIO support"
- depends on ACPI
+ depends on ACPI && X86
select IRQ_DOMAIN
help
driver for GPIO functionality on Intel Lynxpoint PCH chipset
Requires ACPI device enumeration code to set up a platform device.
+config GPIO_GRGPIO
+ tristate "Aeroflex Gaisler GRGPIO support"
+ depends on OF
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ help
+ Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
+ VHDL IP core library.
+
comment "I2C GPIO expanders:"
config GPIO_ARIZONA
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 22e07bc..0cb2d65 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
+obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
@@ -57,6 +58,7 @@ obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
@@ -80,7 +82,6 @@ obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
-obj-$(CONFIG_GPIO_VT8500) += gpio-vt8500.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 464be96..7216079 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -137,7 +137,7 @@ static int gen_74x164_probe(struct spi_device *spi)
mutex_init(&chip->lock);
- dev_set_drvdata(&spi->dev, chip);
+ spi_set_drvdata(spi, chip);
chip->spi = spi;
@@ -176,7 +176,7 @@ static int gen_74x164_probe(struct spi_device *spi)
return ret;
exit_destroy:
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
mutex_destroy(&chip->lock);
return ret;
}
@@ -186,11 +186,11 @@ static int gen_74x164_remove(struct spi_device *spi)
struct gen_74x164_chip *chip;
int ret;
- chip = dev_get_drvdata(&spi->dev);
+ chip = spi_get_drvdata(spi);
if (chip == NULL)
return -ENODEV;
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
ret = gpiochip_remove(&chip->gpio_chip);
if (!ret)
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 8afa95f..f33f78d 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -105,7 +105,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -163,7 +163,6 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
return 0;
err:
- kfree(dev);
return ret;
}
@@ -180,7 +179,6 @@ static int adp5520_gpio_remove(struct platform_device *pdev)
return ret;
}
- kfree(dev);
return 0;
}
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index deca78f..5cba855 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -231,10 +231,12 @@ static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int virq,
static struct irq_domain_ops em_gio_irq_domain_ops = {
.map = em_gio_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int em_gio_probe(struct platform_device *pdev)
{
+ struct gpio_em_config pdata_dt;
struct gpio_em_config *pdata = pdev->dev.platform_data;
struct em_gio_priv *p;
struct resource *io[2], *irq[2];
@@ -243,7 +245,7 @@ static int em_gio_probe(struct platform_device *pdev)
const char *name = dev_name(&pdev->dev);
int ret;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
ret = -ENOMEM;
@@ -259,24 +261,45 @@ static int em_gio_probe(struct platform_device *pdev)
irq[0] = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!io[0] || !io[1] || !irq[0] || !irq[1] || !pdata) {
- dev_err(&pdev->dev, "missing IRQ, IOMEM or configuration\n");
+ if (!io[0] || !io[1] || !irq[0] || !irq[1]) {
+ dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
ret = -EINVAL;
- goto err1;
+ goto err0;
}
- p->base0 = ioremap_nocache(io[0]->start, resource_size(io[0]));
+ p->base0 = devm_ioremap_nocache(&pdev->dev, io[0]->start,
+ resource_size(io[0]));
if (!p->base0) {
dev_err(&pdev->dev, "failed to remap low I/O memory\n");
ret = -ENXIO;
- goto err1;
+ goto err0;
}
- p->base1 = ioremap_nocache(io[1]->start, resource_size(io[1]));
+ p->base1 = devm_ioremap_nocache(&pdev->dev, io[1]->start,
+ resource_size(io[1]));
if (!p->base1) {
dev_err(&pdev->dev, "failed to remap high I/O memory\n");
ret = -ENXIO;
- goto err2;
+ goto err0;
+ }
+
+ if (!pdata) {
+ memset(&pdata_dt, 0, sizeof(pdata_dt));
+ pdata = &pdata_dt;
+
+ if (of_property_read_u32(pdev->dev.of_node, "ngpios",
+ &pdata->number_of_pins)) {
+ dev_err(&pdev->dev, "Missing ngpios OF property\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ ret = of_alias_get_id(pdev->dev.of_node, "gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't get OF id\n");
+ goto err0;
+ }
+ pdata->gpio_base = ret * 32; /* 32 GPIOs per instance */
}
gpio_chip = &p->gpio_chip;
@@ -306,40 +329,32 @@ static int em_gio_probe(struct platform_device *pdev)
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(&pdev->dev, "cannot initialize irq domain\n");
- goto err3;
+ goto err0;
}
- if (request_irq(irq[0]->start, em_gio_irq_handler, 0, name, p)) {
+ if (devm_request_irq(&pdev->dev, irq[0]->start,
+ em_gio_irq_handler, 0, name, p)) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
ret = -ENOENT;
- goto err4;
+ goto err1;
}
- if (request_irq(irq[1]->start, em_gio_irq_handler, 0, name, p)) {
+ if (devm_request_irq(&pdev->dev, irq[1]->start,
+ em_gio_irq_handler, 0, name, p)) {
dev_err(&pdev->dev, "failed to request high IRQ\n");
ret = -ENOENT;
- goto err5;
+ goto err1;
}
ret = gpiochip_add(gpio_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add GPIO controller\n");
- goto err6;
+ goto err1;
}
return 0;
-err6:
- free_irq(irq[1]->start, pdev);
-err5:
- free_irq(irq[0]->start, pdev);
-err4:
- irq_domain_remove(p->irq_domain);
-err3:
- iounmap(p->base1);
-err2:
- iounmap(p->base0);
err1:
- kfree(p);
+ irq_domain_remove(p->irq_domain);
err0:
return ret;
}
@@ -347,34 +362,43 @@ err0:
static int em_gio_remove(struct platform_device *pdev)
{
struct em_gio_priv *p = platform_get_drvdata(pdev);
- struct resource *irq[2];
int ret;
ret = gpiochip_remove(&p->gpio_chip);
if (ret)
return ret;
- irq[0] = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
-
- free_irq(irq[1]->start, pdev);
- free_irq(irq[0]->start, pdev);
irq_domain_remove(p->irq_domain);
- iounmap(p->base1);
- iounmap(p->base0);
- kfree(p);
return 0;
}
+static const struct of_device_id em_gio_dt_ids[] = {
+ { .compatible = "renesas,em-gio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, em_gio_dt_ids);
+
static struct platform_driver em_gio_device_driver = {
.probe = em_gio_probe,
.remove = em_gio_remove,
.driver = {
.name = "em_gio",
+ .of_match_table = em_gio_dt_ids,
+ .owner = THIS_MODULE,
}
};
-module_platform_driver(em_gio_device_driver);
+static int __init em_gio_init(void)
+{
+ return platform_driver_register(&em_gio_device_driver);
+}
+postcore_initcall(em_gio_init);
+
+static void __exit em_gio_exit(void)
+{
+ platform_driver_unregister(&em_gio_device_driver);
+}
+module_exit(em_gio_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas Emma Mobile GIO Driver");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 05fcc0f..d2196bf 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -104,6 +104,26 @@ static unsigned long bgpio_read64(void __iomem *reg)
}
#endif /* BITS_PER_LONG >= 64 */
+static void bgpio_write16be(void __iomem *reg, unsigned long data)
+{
+ iowrite16be(data, reg);
+}
+
+static unsigned long bgpio_read16be(void __iomem *reg)
+{
+ return ioread16be(reg);
+}
+
+static void bgpio_write32be(void __iomem *reg, unsigned long data)
+{
+ iowrite32be(data, reg);
+}
+
+static unsigned long bgpio_read32be(void __iomem *reg)
+{
+ return ioread32be(reg);
+}
+
static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
{
return 1 << pin;
@@ -249,7 +269,8 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
static int bgpio_setup_accessors(struct device *dev,
struct bgpio_chip *bgc,
- bool be)
+ bool bit_be,
+ bool byte_be)
{
switch (bgc->bits) {
@@ -258,17 +279,33 @@ static int bgpio_setup_accessors(struct device *dev,
bgc->write_reg = bgpio_write8;
break;
case 16:
- bgc->read_reg = bgpio_read16;
- bgc->write_reg = bgpio_write16;
+ if (byte_be) {
+ bgc->read_reg = bgpio_read16be;
+ bgc->write_reg = bgpio_write16be;
+ } else {
+ bgc->read_reg = bgpio_read16;
+ bgc->write_reg = bgpio_write16;
+ }
break;
case 32:
- bgc->read_reg = bgpio_read32;
- bgc->write_reg = bgpio_write32;
+ if (byte_be) {
+ bgc->read_reg = bgpio_read32be;
+ bgc->write_reg = bgpio_write32be;
+ } else {
+ bgc->read_reg = bgpio_read32;
+ bgc->write_reg = bgpio_write32;
+ }
break;
#if BITS_PER_LONG >= 64
case 64:
- bgc->read_reg = bgpio_read64;
- bgc->write_reg = bgpio_write64;
+ if (byte_be) {
+ dev_err(dev,
+ "64 bit big endian byte order unsupported\n");
+ return -EINVAL;
+ } else {
+ bgc->read_reg = bgpio_read64;
+ bgc->write_reg = bgpio_write64;
+ }
break;
#endif /* BITS_PER_LONG >= 64 */
default:
@@ -276,7 +313,7 @@ static int bgpio_setup_accessors(struct device *dev,
return -EINVAL;
}
- bgc->pin2mask = be ? bgpio_pin2mask_be : bgpio_pin2mask;
+ bgc->pin2mask = bit_be ? bgpio_pin2mask_be : bgpio_pin2mask;
return 0;
}
@@ -353,11 +390,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
int bgpio_remove(struct bgpio_chip *bgc)
{
- int err = gpiochip_remove(&bgc->gc);
-
- kfree(bgc);
-
- return err;
+ return gpiochip_remove(&bgc->gc);
}
EXPORT_SYMBOL_GPL(bgpio_remove);
@@ -385,7 +418,8 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, bgc, flags & BGPIOF_BIG_ENDIAN);
+ ret = bgpio_setup_accessors(dev, bgc, flags & BGPIOF_BIG_ENDIAN,
+ flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
new file mode 100644
index 0000000..8e08b86
--- /dev/null
+++ b/drivers/gpio/gpio-grgpio.c
@@ -0,0 +1,505 @@
+/*
+ * Driver for Aeroflex Gaisler GRGPIO General Purpose I/O cores.
+ *
+ * 2013 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports the GRGPIO GPIO core available in the GRLIB VHDL
+ * IP core library.
+ *
+ * Full documentation of the GRGPIO core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * See "Documentation/devicetree/bindings/gpio/gpio-grgpio.txt" for
+ * information on open firmware properties.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Andreas Larsson <andreas@gaisler.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/basic_mmio_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+
+#define GRGPIO_MAX_NGPIO 32
+
+#define GRGPIO_DATA 0x00
+#define GRGPIO_OUTPUT 0x04
+#define GRGPIO_DIR 0x08
+#define GRGPIO_IMASK 0x0c
+#define GRGPIO_IPOL 0x10
+#define GRGPIO_IEDGE 0x14
+#define GRGPIO_BYPASS 0x18
+#define GRGPIO_IMAP_BASE 0x20
+
+/* Structure for an irq of the core - called an underlying irq */
+struct grgpio_uirq {
+ u8 refcnt; /* Reference counter to manage requesting/freeing of uirq */
+ u8 uirq; /* Underlying irq of the gpio driver */
+};
+
+/*
+ * Structure for an irq of a gpio line handed out by this driver. The index is
+ * used to map to the corresponding underlying irq.
+ */
+struct grgpio_lirq {
+ s8 index; /* Index into struct grgpio_priv's uirqs, or -1 */
+ u8 irq; /* irq for the gpio line */
+};
+
+struct grgpio_priv {
+ struct bgpio_chip bgc;
+ void __iomem *regs;
+ struct device *dev;
+
+ u32 imask; /* irq mask shadow register */
+
+ /*
+ * The grgpio core can have multiple "underlying" irqs. The gpio lines
+ * can be mapped to any one or none of these underlying irqs
+ * independently of each other. This driver sets up an irq domain and
+ * hands out separate irqs to each gpio line
+ */
+ struct irq_domain *domain;
+
+ /*
+ * This array contains information on each underlying irq, each
+ * irq of the grgpio core itself.
+ */
+ struct grgpio_uirq uirqs[GRGPIO_MAX_NGPIO];
+
+ /*
+ * This array contains information for each gpio line on the irqs
+ * obtains from this driver. An index value of -1 for a certain gpio
+ * line indicates that the line has no irq. Otherwise the index connects
+ * the irq to the underlying irq by pointing into the uirqs array.
+ */
+ struct grgpio_lirq lirqs[GRGPIO_MAX_NGPIO];
+};
+
+static inline struct grgpio_priv *grgpio_gc_to_priv(struct gpio_chip *gc)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return container_of(bgc, struct grgpio_priv, bgc);
+}
+
+static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
+ int val)
+{
+ struct bgpio_chip *bgc = &priv->bgc;
+ unsigned long mask = bgc->pin2mask(bgc, offset);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bgc->lock, flags);
+
+ if (val)
+ priv->imask |= mask;
+ else
+ priv->imask &= ~mask;
+ bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
+
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct grgpio_priv *priv = grgpio_gc_to_priv(gc);
+
+ if (offset > gc->ngpio)
+ return -ENXIO;
+
+ if (priv->lirqs[offset].index < 0)
+ return -ENXIO;
+
+ return irq_create_mapping(priv->domain, offset);
+}
+
+/* -------------------- IRQ chip functions -------------------- */
+
+static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ u32 mask = BIT(d->hwirq);
+ u32 ipol;
+ u32 iedge;
+ u32 pol;
+ u32 edge;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ pol = 0;
+ edge = 0;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ pol = mask;
+ edge = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ pol = 0;
+ edge = mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ pol = mask;
+ edge = mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
+
+ ipol = priv->bgc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
+ iedge = priv->bgc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
+
+ priv->bgc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
+ priv->bgc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
+
+ return 0;
+}
+
+static void grgpio_irq_mask(struct irq_data *d)
+{
+ struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
+ int offset = d->hwirq;
+
+ grgpio_set_imask(priv, offset, 0);
+}
+
+static void grgpio_irq_unmask(struct irq_data *d)
+{
+ struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
+ int offset = d->hwirq;
+
+ grgpio_set_imask(priv, offset, 1);
+}
+
+static struct irq_chip grgpio_irq_chip = {
+ .name = "grgpio",
+ .irq_mask = grgpio_irq_mask,
+ .irq_unmask = grgpio_irq_unmask,
+ .irq_set_type = grgpio_irq_set_type,
+};
+
+static irqreturn_t grgpio_irq_handler(int irq, void *dev)
+{
+ struct grgpio_priv *priv = dev;
+ int ngpio = priv->bgc.gc.ngpio;
+ unsigned long flags;
+ int i;
+ int match = 0;
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
+
+ /*
+ * For each gpio line, call its interrupt handler if it its underlying
+ * irq matches the current irq that is handled.
+ */
+ for (i = 0; i < ngpio; i++) {
+ struct grgpio_lirq *lirq = &priv->lirqs[i];
+
+ if (priv->imask & BIT(i) && lirq->index >= 0 &&
+ priv->uirqs[lirq->index].uirq == irq) {
+ generic_handle_irq(lirq->irq);
+ match = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
+
+ if (!match)
+ dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function will be called as a consequence of the call to
+ * irq_create_mapping in grgpio_to_irq
+ */
+int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct grgpio_priv *priv = d->host_data;
+ struct grgpio_lirq *lirq;
+ struct grgpio_uirq *uirq;
+ unsigned long flags;
+ int offset = hwirq;
+ int ret = 0;
+
+ if (!priv)
+ return -EINVAL;
+
+ lirq = &priv->lirqs[offset];
+ if (lirq->index < 0)
+ return -EINVAL;
+
+ dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
+ irq, offset);
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
+
+ /* Request underlying irq if not already requested */
+ lirq->irq = irq;
+ uirq = &priv->uirqs[lirq->index];
+ if (uirq->refcnt == 0) {
+ ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
+ dev_name(priv->dev), priv);
+ if (ret) {
+ dev_err(priv->dev,
+ "Could not request underlying irq %d\n",
+ uirq->uirq);
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
+
+ return ret;
+ }
+ }
+ uirq->refcnt++;
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
+
+ /* Setup irq */
+ irq_set_chip_data(irq, priv);
+ irq_set_chip_and_handler(irq, &grgpio_irq_chip,
+ handle_simple_irq);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return ret;
+}
+
+void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ struct grgpio_priv *priv = d->host_data;
+ int index;
+ struct grgpio_lirq *lirq;
+ struct grgpio_uirq *uirq;
+ unsigned long flags;
+ int ngpio = priv->bgc.gc.ngpio;
+ int i;
+
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
+
+ /* Free underlying irq if last user unmapped */
+ index = -1;
+ for (i = 0; i < ngpio; i++) {
+ lirq = &priv->lirqs[i];
+ if (lirq->irq == irq) {
+ grgpio_set_imask(priv, i, 0);
+ lirq->irq = 0;
+ index = lirq->index;
+ break;
+ }
+ }
+ WARN_ON(index < 0);
+
+ if (index >= 0) {
+ uirq = &priv->uirqs[lirq->index];
+ uirq->refcnt--;
+ if (uirq->refcnt == 0)
+ free_irq(uirq->uirq, priv);
+ }
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
+}
+
+static struct irq_domain_ops grgpio_irq_domain_ops = {
+ .map = grgpio_irq_map,
+ .unmap = grgpio_irq_unmap,
+};
+
+/* ------------------------------------------------------------ */
+
+static int grgpio_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ void __iomem *regs;
+ struct gpio_chip *gc;
+ struct bgpio_chip *bgc;
+ struct grgpio_priv *priv;
+ struct resource *res;
+ int err;
+ u32 prop;
+ s32 *irqmap;
+ int size;
+ int i;
+
+ priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&ofdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ bgc = &priv->bgc;
+ err = bgpio_init(bgc, &ofdev->dev, 4, regs + GRGPIO_DATA,
+ regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
+ BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (err) {
+ dev_err(&ofdev->dev, "bgpio_init() failed\n");
+ return err;
+ }
+
+ priv->regs = regs;
+ priv->imask = bgc->read_reg(regs + GRGPIO_IMASK);
+ priv->dev = &ofdev->dev;
+
+ gc = &bgc->gc;
+ gc->of_node = np;
+ gc->owner = THIS_MODULE;
+ gc->to_irq = grgpio_to_irq;
+ gc->label = np->full_name;
+ gc->base = -1;
+
+ err = of_property_read_u32(np, "nbits", &prop);
+ if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
+ gc->ngpio = GRGPIO_MAX_NGPIO;
+ dev_dbg(&ofdev->dev,
+ "No or invalid nbits property: assume %d\n", gc->ngpio);
+ } else {
+ gc->ngpio = prop;
+ }
+
+ /*
+ * The irqmap contains the index values indicating which underlying irq,
+ * if anyone, is connected to that line
+ */
+ irqmap = (s32 *)of_get_property(np, "irqmap", &size);
+ if (irqmap) {
+ if (size < gc->ngpio) {
+ dev_err(&ofdev->dev,
+ "irqmap shorter than ngpio (%d < %d)\n",
+ size, gc->ngpio);
+ return -EINVAL;
+ }
+
+ priv->domain = irq_domain_add_linear(np, gc->ngpio,
+ &grgpio_irq_domain_ops,
+ priv);
+ if (!priv->domain) {
+ dev_err(&ofdev->dev, "Could not add irq domain\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < gc->ngpio; i++) {
+ struct grgpio_lirq *lirq;
+ int ret;
+
+ lirq = &priv->lirqs[i];
+ lirq->index = irqmap[i];
+
+ if (lirq->index < 0)
+ continue;
+
+ ret = platform_get_irq(ofdev, lirq->index);
+ if (ret <= 0) {
+ /*
+ * Continue without irq functionality for that
+ * gpio line
+ */
+ dev_err(priv->dev,
+ "Failed to get irq for offset %d\n", i);
+ continue;
+ }
+ priv->uirqs[lirq->index].uirq = ret;
+ }
+ }
+
+ platform_set_drvdata(ofdev, priv);
+
+ err = gpiochip_add(gc);
+ if (err) {
+ dev_err(&ofdev->dev, "Could not add gpiochip\n");
+ return err;
+ }
+
+ dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
+ priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
+
+ return 0;
+}
+
+static int grgpio_remove(struct platform_device *ofdev)
+{
+ struct grgpio_priv *priv = platform_get_drvdata(ofdev);
+ unsigned long flags;
+ int i;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
+
+ if (priv->domain) {
+ for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
+ if (priv->uirqs[i].refcnt != 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+ }
+
+ ret = gpiochip_remove(&priv->bgc.gc);
+ if (ret)
+ goto out;
+
+ if (priv->domain)
+ irq_domain_remove(priv->domain);
+
+out:
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
+
+ return ret;
+}
+
+static struct of_device_id grgpio_match[] = {
+ {.name = "GAISLER_GPIO"},
+ {.name = "01_01a"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, grgpio_match);
+
+static struct platform_driver grgpio_driver = {
+ .driver = {
+ .name = "grgpio",
+ .owner = THIS_MODULE,
+ .of_match_table = grgpio_match,
+ },
+ .probe = grgpio_probe,
+ .remove = grgpio_remove,
+};
+module_platform_driver(grgpio_driver);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Driver for Aeroflex Gaisler GRGPIO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index de3c317..e16d932 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -130,14 +130,11 @@ static int ichx_read_bit(int reg, unsigned nr)
static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
{
- return ichx_priv.use_gpio & (1 << (nr / 32));
+ return !!(ichx_priv.use_gpio & (1 << (nr / 32)));
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
- if (!ichx_gpio_check_available(gpio, nr))
- return -ENXIO;
-
/*
* Try setting pin as an input and verify it worked since many pins
* are output-only.
@@ -151,9 +148,6 @@ static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
int val)
{
- if (!ichx_gpio_check_available(gpio, nr))
- return -ENXIO;
-
/* Set GPIO output value. */
ichx_write_bit(GPIO_LVL, nr, val, 0);
@@ -169,9 +163,6 @@ static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
{
- if (!ichx_gpio_check_available(chip, nr))
- return -ENXIO;
-
return ichx_read_bit(GPIO_LVL, nr);
}
@@ -180,9 +171,6 @@ static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
unsigned long flags;
u32 data;
- if (!ichx_gpio_check_available(chip, nr))
- return -ENXIO;
-
/*
* GPI 0 - 15 need to be read from the power management registers on
* a ICH6/3100 bridge.
@@ -207,6 +195,9 @@ static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
{
+ if (!ichx_gpio_check_available(chip, nr))
+ return -ENXIO;
+
/*
* Note we assume the BIOS properly set a bridge's USE value. Some
* chips (eg Intel 3100) have bogus USE values though, so first see if
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 36d7dee..dda6a75 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -533,7 +533,7 @@ static int lpc32xx_of_xlate(struct gpio_chip *gc,
{
/* Is this the correct bank? */
u32 bank = gpiospec->args[0];
- if ((bank > ARRAY_SIZE(lpc32xx_gpiochip) ||
+ if ((bank >= ARRAY_SIZE(lpc32xx_gpiochip) ||
(gc != &lpc32xx_gpiochip[bank].chip)))
return -EINVAL;
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 3472b05..86c17de 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -32,6 +32,7 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/io.h>
/* LynxPoint chipset has support for 94 gpio pins */
diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c
index 4b6b9a0..40ab6df 100644
--- a/drivers/gpio/gpio-max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -41,7 +41,7 @@ static int max7300_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- ts = kzalloc(sizeof(struct max7301), GFP_KERNEL);
+ ts = devm_kzalloc(&client->dev, sizeof(struct max7301), GFP_KERNEL);
if (!ts)
return -ENOMEM;
@@ -50,8 +50,6 @@ static int max7300_probe(struct i2c_client *client,
ts->dev = &client->dev;
ret = __max730x_probe(ts);
- if (ret)
- kfree(ts);
return ret;
}
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index c6c535c..3b16ab7 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -56,12 +56,13 @@ static int max7301_probe(struct spi_device *spi)
int ret;
/* bits_per_word cannot be configured in platform data */
- spi->bits_per_word = 16;
+ if (spi->dev.platform_data)
+ spi->bits_per_word = 16;
ret = spi_setup(spi);
if (ret < 0)
return ret;
- ts = kzalloc(sizeof(struct max7301), GFP_KERNEL);
+ ts = devm_kzalloc(&spi->dev, sizeof(struct max7301), GFP_KERNEL);
if (!ts)
return -ENOMEM;
@@ -70,8 +71,6 @@ static int max7301_probe(struct spi_device *spi)
ts->dev = &spi->dev;
ret = __max730x_probe(ts);
- if (ret)
- kfree(ts);
return ret;
}
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 1e0467c..d4b51b1 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -589,7 +589,8 @@ static int max732x_probe(struct i2c_client *client,
return -EINVAL;
}
- chip = kzalloc(sizeof(struct max732x_chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(struct max732x_chip),
+ GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->client = client;
@@ -647,7 +648,6 @@ static int max732x_probe(struct i2c_client *client,
out_failed:
max732x_irq_teardown(chip);
- kfree(chip);
return ret;
}
@@ -680,7 +680,6 @@ static int max732x_remove(struct i2c_client *client)
if (chip->client_dummy)
i2c_unregister_device(chip->client_dummy);
- kfree(chip);
return 0;
}
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 6a8fdc2..63a7a1b 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -101,13 +101,13 @@ static int mc33880_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- mc = kzalloc(sizeof(struct mc33880), GFP_KERNEL);
+ mc = devm_kzalloc(&spi->dev, sizeof(struct mc33880), GFP_KERNEL);
if (!mc)
return -ENOMEM;
mutex_init(&mc->lock);
- dev_set_drvdata(&spi->dev, mc);
+ spi_set_drvdata(spi, mc);
mc->spi = spi;
@@ -130,7 +130,8 @@ static int mc33880_probe(struct spi_device *spi)
ret = mc33880_write_config(mc);
if (ret) {
- printk(KERN_ERR "Failed writing to " DRIVER_NAME ": %d\n", ret);
+ dev_err(&spi->dev, "Failed writing to " DRIVER_NAME ": %d\n",
+ ret);
goto exit_destroy;
}
@@ -141,9 +142,8 @@ static int mc33880_probe(struct spi_device *spi)
return ret;
exit_destroy:
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
mutex_destroy(&mc->lock);
- kfree(mc);
return ret;
}
@@ -152,17 +152,16 @@ static int mc33880_remove(struct spi_device *spi)
struct mc33880 *mc;
int ret;
- mc = dev_get_drvdata(&spi->dev);
+ mc = spi_get_drvdata(spi);
if (mc == NULL)
return -ENODEV;
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
ret = gpiochip_remove(&mc->chip);
- if (!ret) {
+ if (!ret)
mutex_destroy(&mc->lock);
- kfree(mc);
- } else
+ else
dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
ret);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 3cea0ea..6a4470b 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -12,6 +12,8 @@
#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
/**
* MCP types supported by driver
@@ -383,6 +385,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
+#ifdef CONFIG_OF
+ mcp->chip.of_gpio_n_cells = 2;
+ mcp->chip.of_node = dev->of_node;
+#endif
switch (type) {
#ifdef CONFIG_SPI_MASTER
@@ -473,6 +479,35 @@ fail:
/*----------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+#ifdef CONFIG_SPI_MASTER
+static struct of_device_id mcp23s08_spi_of_match[] = {
+ {
+ .compatible = "mcp,mcp23s08", .data = (void *) MCP_TYPE_S08,
+ },
+ {
+ .compatible = "mcp,mcp23s17", .data = (void *) MCP_TYPE_S17,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+static struct of_device_id mcp23s08_i2c_of_match[] = {
+ {
+ .compatible = "mcp,mcp23008", .data = (void *) MCP_TYPE_008,
+ },
+ {
+ .compatible = "mcp,mcp23017", .data = (void *) MCP_TYPE_017,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mcp23s08_i2c_of_match);
+#endif
+#endif /* CONFIG_OF */
+
+
#if IS_ENABLED(CONFIG_I2C)
static int mcp230xx_probe(struct i2c_client *client,
@@ -480,12 +515,23 @@ static int mcp230xx_probe(struct i2c_client *client,
{
struct mcp23s08_platform_data *pdata;
struct mcp23s08 *mcp;
- int status;
-
- pdata = client->dev.platform_data;
- if (!pdata || !gpio_is_valid(pdata->base)) {
- dev_dbg(&client->dev, "invalid or missing platform data\n");
- return -EINVAL;
+ int status, base, pullups;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(mcp23s08_i2c_of_match),
+ &client->dev);
+ if (match) {
+ base = -1;
+ pullups = 0;
+ } else {
+ pdata = client->dev.platform_data;
+ if (!pdata || !gpio_is_valid(pdata->base)) {
+ dev_dbg(&client->dev,
+ "invalid or missing platform data\n");
+ return -EINVAL;
+ }
+ base = pdata->base;
+ pullups = pdata->chip[0].pullups;
}
mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
@@ -493,8 +539,7 @@ static int mcp230xx_probe(struct i2c_client *client,
return -ENOMEM;
status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
- id->driver_data, pdata->base,
- pdata->chip[0].pullups);
+ id->driver_data, base, pullups);
if (status)
goto fail;
@@ -531,6 +576,7 @@ static struct i2c_driver mcp230xx_driver = {
.driver = {
.name = "mcp230xx",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mcp23s08_i2c_of_match),
},
.probe = mcp230xx_probe,
.remove = mcp230xx_remove,
@@ -565,28 +611,55 @@ static int mcp23s08_probe(struct spi_device *spi)
unsigned chips = 0;
struct mcp23s08_driver_data *data;
int status, type;
- unsigned base;
-
- type = spi_get_device_id(spi)->driver_data;
-
- pdata = spi->dev.platform_data;
- if (!pdata || !gpio_is_valid(pdata->base)) {
- dev_dbg(&spi->dev, "invalid or missing platform data\n");
- return -EINVAL;
- }
+ unsigned base = -1,
+ ngpio = 0,
+ pullups[ARRAY_SIZE(pdata->chip)];
+ const struct of_device_id *match;
+ u32 spi_present_mask = 0;
+
+ match = of_match_device(of_match_ptr(mcp23s08_spi_of_match), &spi->dev);
+ if (match) {
+ type = (int)match->data;
+ status = of_property_read_u32(spi->dev.of_node,
+ "mcp,spi-present-mask", &spi_present_mask);
+ if (status) {
+ dev_err(&spi->dev, "DT has no spi-present-mask\n");
+ return -ENODEV;
+ }
+ if ((spi_present_mask <= 0) || (spi_present_mask >= 256)) {
+ dev_err(&spi->dev, "invalid spi-present-mask\n");
+ return -ENODEV;
+ }
- for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
- if (!pdata->chip[addr].is_present)
- continue;
- chips++;
- if ((type == MCP_TYPE_S08) && (addr > 3)) {
- dev_err(&spi->dev,
- "mcp23s08 only supports address 0..3\n");
+ for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++)
+ pullups[addr] = 0;
+ } else {
+ type = spi_get_device_id(spi)->driver_data;
+ pdata = spi->dev.platform_data;
+ if (!pdata || !gpio_is_valid(pdata->base)) {
+ dev_dbg(&spi->dev,
+ "invalid or missing platform data\n");
return -EINVAL;
}
+
+ for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+ if (!pdata->chip[addr].is_present)
+ continue;
+ chips++;
+ if ((type == MCP_TYPE_S08) && (addr > 3)) {
+ dev_err(&spi->dev,
+ "mcp23s08 only supports address 0..3\n");
+ return -EINVAL;
+ }
+ spi_present_mask |= 1 << addr;
+ pullups[addr] = pdata->chip[addr].pullups;
+ }
+
+ if (!chips)
+ return -ENODEV;
+
+ base = pdata->base;
}
- if (!chips)
- return -ENODEV;
data = kzalloc(sizeof *data + chips * sizeof(struct mcp23s08),
GFP_KERNEL);
@@ -594,21 +667,22 @@ static int mcp23s08_probe(struct spi_device *spi)
return -ENOMEM;
spi_set_drvdata(spi, data);
- base = pdata->base;
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
- if (!pdata->chip[addr].is_present)
+ if (!(spi_present_mask & (1 << addr)))
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, base,
- pdata->chip[addr].pullups);
+ pullups[addr]);
if (status < 0)
goto fail;
- base += (type == MCP_TYPE_S17) ? 16 : 8;
+ if (base != -1)
+ base += (type == MCP_TYPE_S17) ? 16 : 8;
+ ngpio += (type == MCP_TYPE_S17) ? 16 : 8;
}
- data->ngpio = base - pdata->base;
+ data->ngpio = ngpio;
/* NOTE: these chips have a relatively sane IRQ framework, with
* per-signal masking and level/edge triggering. It's not yet
@@ -668,6 +742,7 @@ static struct spi_driver mcp23s08_driver = {
.driver = {
.name = "mcp23s08",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mcp23s08_spi_of_match),
},
};
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index 52a4d42..c798585 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -19,9 +19,10 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <mach/cpu.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
#include <mach/msm_gpiomux.h>
-#include <mach/msm_iomap.h>
/* see 80-VA736-2 Rev C pp 695-751
**
@@ -34,10 +35,10 @@
** macros.
*/
-#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
-#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
-#define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
-#define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
+#define MSM_GPIO1_REG(off) (off)
+#define MSM_GPIO2_REG(off) (off)
+#define MSM_GPIO1_SHADOW_REG(off) (off)
+#define MSM_GPIO2_SHADOW_REG(off) (off)
/*
* MSM7X00 registers
@@ -276,16 +277,14 @@
#define MSM_GPIO_BANK(soc, bank, first, last) \
{ \
- .regs = { \
- .out = soc##_GPIO_OUT_##bank, \
- .in = soc##_GPIO_IN_##bank, \
- .int_status = soc##_GPIO_INT_STATUS_##bank, \
- .int_clear = soc##_GPIO_INT_CLEAR_##bank, \
- .int_en = soc##_GPIO_INT_EN_##bank, \
- .int_edge = soc##_GPIO_INT_EDGE_##bank, \
- .int_pos = soc##_GPIO_INT_POS_##bank, \
- .oe = soc##_GPIO_OE_##bank, \
- }, \
+ .regs[MSM_GPIO_OUT] = soc##_GPIO_OUT_##bank, \
+ .regs[MSM_GPIO_IN] = soc##_GPIO_IN_##bank, \
+ .regs[MSM_GPIO_INT_STATUS] = soc##_GPIO_INT_STATUS_##bank, \
+ .regs[MSM_GPIO_INT_CLEAR] = soc##_GPIO_INT_CLEAR_##bank, \
+ .regs[MSM_GPIO_INT_EN] = soc##_GPIO_INT_EN_##bank, \
+ .regs[MSM_GPIO_INT_EDGE] = soc##_GPIO_INT_EDGE_##bank, \
+ .regs[MSM_GPIO_INT_POS] = soc##_GPIO_INT_POS_##bank, \
+ .regs[MSM_GPIO_OE] = soc##_GPIO_OE_##bank, \
.chip = { \
.base = (first), \
.ngpio = (last) - (first) + 1, \
@@ -301,39 +300,57 @@
#define MSM_GPIO_BROKEN_INT_CLEAR 1
-struct msm_gpio_regs {
- void __iomem *out;
- void __iomem *in;
- void __iomem *int_status;
- void __iomem *int_clear;
- void __iomem *int_en;
- void __iomem *int_edge;
- void __iomem *int_pos;
- void __iomem *oe;
+enum msm_gpio_reg {
+ MSM_GPIO_IN,
+ MSM_GPIO_OUT,
+ MSM_GPIO_INT_STATUS,
+ MSM_GPIO_INT_CLEAR,
+ MSM_GPIO_INT_EN,
+ MSM_GPIO_INT_EDGE,
+ MSM_GPIO_INT_POS,
+ MSM_GPIO_OE,
+ MSM_GPIO_REG_NR
};
struct msm_gpio_chip {
spinlock_t lock;
struct gpio_chip chip;
- struct msm_gpio_regs regs;
+ unsigned long regs[MSM_GPIO_REG_NR];
#if MSM_GPIO_BROKEN_INT_CLEAR
unsigned int_status_copy;
#endif
unsigned int both_edge_detect;
unsigned int int_enable[2]; /* 0: awake, 1: sleep */
+ void __iomem *base;
+};
+
+struct msm_gpio_initdata {
+ struct msm_gpio_chip *chips;
+ int count;
};
+static void msm_gpio_writel(struct msm_gpio_chip *chip, u32 val,
+ enum msm_gpio_reg reg)
+{
+ writel(val, chip->base + chip->regs[reg]);
+}
+
+static u32 msm_gpio_readl(struct msm_gpio_chip *chip, enum msm_gpio_reg reg)
+{
+ return readl(chip->base + chip->regs[reg]);
+}
+
static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
unsigned offset, unsigned on)
{
unsigned mask = BIT(offset);
unsigned val;
- val = readl(msm_chip->regs.out);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_OUT);
if (on)
- writel(val | mask, msm_chip->regs.out);
+ msm_gpio_writel(msm_chip, val | mask, MSM_GPIO_OUT);
else
- writel(val & ~mask, msm_chip->regs.out);
+ msm_gpio_writel(msm_chip, val & ~mask, MSM_GPIO_OUT);
return 0;
}
@@ -342,13 +359,13 @@ static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
int loop_limit = 100;
unsigned pol, val, val2, intstat;
do {
- val = readl(msm_chip->regs.in);
- pol = readl(msm_chip->regs.int_pos);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_IN);
+ pol = msm_gpio_readl(msm_chip, MSM_GPIO_INT_POS);
pol = (pol & ~msm_chip->both_edge_detect) |
(~val & msm_chip->both_edge_detect);
- writel(pol, msm_chip->regs.int_pos);
- intstat = readl(msm_chip->regs.int_status);
- val2 = readl(msm_chip->regs.in);
+ msm_gpio_writel(msm_chip, pol, MSM_GPIO_INT_POS);
+ intstat = msm_gpio_readl(msm_chip, MSM_GPIO_INT_STATUS);
+ val2 = msm_gpio_readl(msm_chip, MSM_GPIO_IN);
if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
return;
} while (loop_limit-- > 0);
@@ -365,10 +382,11 @@ static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
/* Save interrupts that already triggered before we loose them. */
/* Any interrupt that triggers between the read of int_status */
/* and the write to int_clear will still be lost though. */
- msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
+ msm_chip->int_status_copy |=
+ msm_gpio_readl(msm_chip, MSM_GPIO_INT_STATUS);
msm_chip->int_status_copy &= ~bit;
#endif
- writel(bit, msm_chip->regs.int_clear);
+ msm_gpio_writel(msm_chip, bit, MSM_GPIO_INT_CLEAR);
msm_gpio_update_both_edge_detect(msm_chip);
return 0;
}
@@ -377,10 +395,12 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct msm_gpio_chip *msm_chip;
unsigned long irq_flags;
+ u32 val;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
- writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_OE) & ~BIT(offset);
+ msm_gpio_writel(msm_chip, val, MSM_GPIO_OE);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
}
@@ -390,11 +410,13 @@ msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct msm_gpio_chip *msm_chip;
unsigned long irq_flags;
+ u32 val;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
msm_gpio_write(msm_chip, offset, value);
- writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_OE) | BIT(offset);
+ msm_gpio_writel(msm_chip, val, MSM_GPIO_OE);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
}
@@ -404,7 +426,7 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
struct msm_gpio_chip *msm_chip;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
+ return (msm_gpio_readl(msm_chip, MSM_GPIO_IN) & (1U << offset)) ? 1 : 0;
}
static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -450,6 +472,11 @@ static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = {
MSM_GPIO_BANK(MSM7X00, 5, 107, 121),
};
+static struct msm_gpio_initdata msm_gpio_7x01_init = {
+ .chips = msm_gpio_chips_msm7x01,
+ .count = ARRAY_SIZE(msm_gpio_chips_msm7x01),
+};
+
static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
MSM_GPIO_BANK(MSM7X30, 0, 0, 15),
MSM_GPIO_BANK(MSM7X30, 1, 16, 43),
@@ -461,6 +488,11 @@ static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
MSM_GPIO_BANK(MSM7X30, 7, 151, 181),
};
+static struct msm_gpio_initdata msm_gpio_7x30_init = {
+ .chips = msm_gpio_chips_msm7x30,
+ .count = ARRAY_SIZE(msm_gpio_chips_msm7x30),
+};
+
static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
MSM_GPIO_BANK(QSD8X50, 0, 0, 15),
MSM_GPIO_BANK(QSD8X50, 1, 16, 42),
@@ -472,6 +504,11 @@ static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
MSM_GPIO_BANK(QSD8X50, 7, 153, 164),
};
+static struct msm_gpio_initdata msm_gpio_8x50_init = {
+ .chips = msm_gpio_chips_qsd8x50,
+ .count = ARRAY_SIZE(msm_gpio_chips_qsd8x50),
+};
+
static void msm_gpio_irq_ack(struct irq_data *d)
{
unsigned long irq_flags;
@@ -490,10 +527,10 @@ static void msm_gpio_irq_mask(struct irq_data *d)
spin_lock_irqsave(&msm_chip->lock, irq_flags);
/* level triggered interrupts are also latched */
- if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ if (!(msm_gpio_readl(msm_chip, MSM_GPIO_INT_EDGE) & BIT(offset)))
msm_gpio_clear_detect_status(msm_chip, offset);
msm_chip->int_enable[0] &= ~BIT(offset);
- writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ msm_gpio_writel(msm_chip, msm_chip->int_enable[0], MSM_GPIO_INT_EN);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
}
@@ -505,10 +542,10 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&msm_chip->lock, irq_flags);
/* level triggered interrupts are also latched */
- if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ if (!(msm_gpio_readl(msm_chip, MSM_GPIO_INT_EDGE) & BIT(offset)))
msm_gpio_clear_detect_status(msm_chip, offset);
msm_chip->int_enable[0] |= BIT(offset);
- writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ msm_gpio_writel(msm_chip, msm_chip->int_enable[0], MSM_GPIO_INT_EN);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
}
@@ -537,12 +574,12 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
unsigned val, mask = BIT(offset);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
- val = readl(msm_chip->regs.int_edge);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_INT_EDGE);
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
- writel(val | mask, msm_chip->regs.int_edge);
+ msm_gpio_writel(msm_chip, val | mask, MSM_GPIO_INT_EDGE);
__irq_set_handler_locked(d->irq, handle_edge_irq);
} else {
- writel(val & ~mask, msm_chip->regs.int_edge);
+ msm_gpio_writel(msm_chip, val & ~mask, MSM_GPIO_INT_EDGE);
__irq_set_handler_locked(d->irq, handle_level_irq);
}
if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
@@ -550,11 +587,12 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
msm_gpio_update_both_edge_detect(msm_chip);
} else {
msm_chip->both_edge_detect &= ~mask;
- val = readl(msm_chip->regs.int_pos);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_INT_POS);
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
- writel(val | mask, msm_chip->regs.int_pos);
+ val |= mask;
else
- writel(val & ~mask, msm_chip->regs.int_pos);
+ val &= ~mask;
+ msm_gpio_writel(msm_chip, val, MSM_GPIO_INT_POS);
}
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
@@ -567,7 +605,7 @@ static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
for (i = 0; i < msm_gpio_count; i++) {
struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
- val = readl(msm_chip->regs.int_status);
+ val = msm_gpio_readl(msm_chip, MSM_GPIO_INT_STATUS);
val &= msm_chip->int_enable[0];
while (val) {
mask = val & -val;
@@ -592,22 +630,36 @@ static struct irq_chip msm_gpio_irq_chip = {
.irq_set_type = msm_gpio_irq_set_type,
};
-static int __init msm_init_gpio(void)
+static int __devinit gpio_msm_v1_probe(struct platform_device *pdev)
{
int i, j = 0;
-
- if (cpu_is_msm7x01()) {
- msm_gpio_chips = msm_gpio_chips_msm7x01;
- msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01);
- } else if (cpu_is_msm7x30()) {
- msm_gpio_chips = msm_gpio_chips_msm7x30;
- msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30);
- } else if (cpu_is_qsd8x50()) {
- msm_gpio_chips = msm_gpio_chips_qsd8x50;
- msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50);
- } else {
- return 0;
- }
+ const struct platform_device_id *dev_id = platform_get_device_id(pdev);
+ struct msm_gpio_initdata *data;
+ int irq1, irq2;
+ struct resource *res;
+ void __iomem *base1, __iomem *base2;
+
+ data = (struct msm_gpio_initdata *)dev_id->driver_data;
+ msm_gpio_chips = data->chips;
+ msm_gpio_count = data->count;
+
+ irq1 = platform_get_irq(pdev, 0);
+ if (irq1 < 0)
+ return irq1;
+
+ irq2 = platform_get_irq(pdev, 1);
+ if (irq2 < 0)
+ return irq2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base1 = devm_request_and_ioremap(&pdev->dev, res);
+ if (!base1)
+ return -EADDRNOTAVAIL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ base2 = devm_request_and_ioremap(&pdev->dev, res);
+ if (!base2)
+ return -EADDRNOTAVAIL;
for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
if (i - FIRST_GPIO_IRQ >=
@@ -621,16 +673,42 @@ static int __init msm_init_gpio(void)
}
for (i = 0; i < msm_gpio_count; i++) {
+ if (i == 1)
+ msm_gpio_chips[i].base = base2;
+ else
+ msm_gpio_chips[i].base = base1;
spin_lock_init(&msm_gpio_chips[i].lock);
- writel(0, msm_gpio_chips[i].regs.int_en);
+ msm_gpio_writel(&msm_gpio_chips[i], 0, MSM_GPIO_INT_EN);
gpiochip_add(&msm_gpio_chips[i].chip);
}
- irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
- irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
- irq_set_irq_wake(INT_GPIO_GROUP1, 1);
- irq_set_irq_wake(INT_GPIO_GROUP2, 2);
+ irq_set_chained_handler(irq1, msm_gpio_irq_handler);
+ irq_set_chained_handler(irq2, msm_gpio_irq_handler);
+ irq_set_irq_wake(irq1, 1);
+ irq_set_irq_wake(irq2, 2);
return 0;
}
-postcore_initcall(msm_init_gpio);
+static struct platform_device_id gpio_msm_v1_device_ids[] = {
+ { "gpio-msm-7201", (unsigned long)&msm_gpio_7x01_init },
+ { "gpio-msm-7x30", (unsigned long)&msm_gpio_7x30_init },
+ { "gpio-msm-8x50", (unsigned long)&msm_gpio_8x50_init },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, gpio_msm_v1_device_ids);
+
+static struct platform_driver gpio_msm_v1_driver = {
+ .driver = {
+ .name = "gpio-msm-v1",
+ .owner = THIS_MODULE,
+ },
+ .probe = gpio_msm_v1_probe,
+ .id_table = gpio_msm_v1_device_ids,
+};
+
+static int __init gpio_msm_v1_init(void)
+{
+ return platform_driver_register(&gpio_msm_v1_driver);
+}
+postcore_initcall(gpio_msm_v1_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 55a7e77..dd2edde 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -23,13 +23,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <asm/mach/irq.h>
-
#include <mach/msm_gpiomux.h>
#include <mach/msm_iomap.h>
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 61a6fde..bf69a7e 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -117,7 +117,7 @@ static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvc
{
int cpu;
- switch(mvchip->soc_variant) {
+ switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
case MVEBU_GPIO_SOC_VARIANT_MV78200:
return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
@@ -133,7 +133,7 @@ static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvch
{
int cpu;
- switch(mvchip->soc_variant) {
+ switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
return mvchip->membase + GPIO_EDGE_MASK_OFF;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
@@ -151,7 +151,7 @@ static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
{
int cpu;
- switch(mvchip->soc_variant) {
+ switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
return mvchip->membase + GPIO_LEVEL_MASK_OFF;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
@@ -401,7 +401,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
/*
* Configure interrupt polarity.
*/
- switch(type) {
+ switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
@@ -470,18 +470,76 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ u32 out, io_conf, blink, in_pol, data_in, cause, edg_msk, lvl_msk;
+ int i;
+
+ out = readl_relaxed(mvebu_gpioreg_out(mvchip));
+ io_conf = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
+ blink = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+ in_pol = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ data_in = readl_relaxed(mvebu_gpioreg_data_in(mvchip));
+ cause = readl_relaxed(mvebu_gpioreg_edge_cause(mvchip));
+ edg_msk = readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
+ lvl_msk = readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
+
+ for (i = 0; i < chip->ngpio; i++) {
+ const char *label;
+ u32 msk;
+ bool is_out;
+
+ label = gpiochip_is_requested(chip, i);
+ if (!label)
+ continue;
+
+ msk = 1 << i;
+ is_out = !(io_conf & msk);
+
+ seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
+
+ if (is_out) {
+ seq_printf(s, " out %s %s\n",
+ out & msk ? "hi" : "lo",
+ blink & msk ? "(blink )" : "");
+ continue;
+ }
+
+ seq_printf(s, " in %s (act %s) - IRQ",
+ (data_in ^ in_pol) & msk ? "hi" : "lo",
+ in_pol & msk ? "lo" : "hi");
+ if (!((edg_msk | lvl_msk) & msk)) {
+ seq_printf(s, " disabled\n");
+ continue;
+ }
+ if (edg_msk & msk)
+ seq_printf(s, " edge ");
+ if (lvl_msk & msk)
+ seq_printf(s, " level");
+ seq_printf(s, " (%s)\n", cause & msk ? "pending" : "clear ");
+ }
+}
+#else
+#define mvebu_gpio_dbg_show NULL
+#endif
+
static struct of_device_id mvebu_gpio_of_match[] = {
{
.compatible = "marvell,orion-gpio",
- .data = (void*) MVEBU_GPIO_SOC_VARIANT_ORION,
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
},
{
.compatible = "marvell,mv78200-gpio",
- .data = (void*) MVEBU_GPIO_SOC_VARIANT_MV78200,
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_MV78200,
},
{
.compatible = "marvell,armadaxp-gpio",
- .data = (void*) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
},
{
/* sentinel */
@@ -509,13 +567,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (! res) {
+ if (!res) {
dev_err(&pdev->dev, "Cannot get memory resource\n");
return -ENODEV;
}
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
- if (! mvchip){
+ if (!mvchip) {
dev_err(&pdev->dev, "Cannot allocate memory\n");
return -ENOMEM;
}
@@ -550,6 +608,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = 0;
mvchip->chip.of_node = np;
+ mvchip->chip.dbg_show = mvebu_gpio_dbg_show;
spin_lock_init(&mvchip->lock);
mvchip->membase = devm_ioremap_resource(&pdev->dev, res);
@@ -560,21 +619,21 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
* per-CPU registers */
if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (! res) {
+ if (!res) {
dev_err(&pdev->dev, "Cannot get memory resource\n");
return -ENODEV;
}
mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
res);
- if (IS_ERR(mvchip->percpu_membase))
+ if (IS_ERR(mvchip->percpu_membase))
return PTR_ERR(mvchip->percpu_membase);
}
/*
* Mask and clear GPIO interrupts.
*/
- switch(soc_variant) {
+ switch (soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
@@ -632,7 +691,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
mvchip->membase, handle_level_irq);
- if (! gc) {
+ if (!gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
return -ENOMEM;
}
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 7877335..7176743 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -32,7 +33,6 @@
#include <linux/of_device.h>
#include <linux/module.h>
#include <asm-generic/bug.h>
-#include <asm/mach/irq.h>
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 159f5c5..2050891 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -25,11 +25,10 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
-#include <asm/mach/irq.h>
-
#define OFF_MODE 1
static LIST_HEAD(omap_gpio_list);
@@ -53,7 +52,6 @@ struct gpio_bank {
struct list_head node;
void __iomem *base;
u16 irq;
- int irq_base;
struct irq_domain *domain;
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
@@ -89,7 +87,14 @@ struct gpio_bank {
static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
- return gpio_irq - bank->irq_base + bank->chip.base;
+ return bank->chip.base + gpio_irq;
+}
+
+static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+
+ return irq_find_mapping(bank->domain, offset);
}
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
@@ -421,13 +426,16 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
int retval;
unsigned long flags;
+ if (WARN_ON(!bank->mod_usage))
+ return -EINVAL;
+
#ifdef CONFIG_ARCH_OMAP1
if (d->irq > IH_MPUIO_BASE)
gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
#endif
if (!gpio)
- gpio = irq_to_gpio(bank, d->irq);
+ gpio = irq_to_gpio(bank, d->hwirq);
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
@@ -580,7 +588,7 @@ static void _reset_gpio(struct gpio_bank *bank, int gpio)
static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
{
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int gpio = irq_to_gpio(bank, d->irq);
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq);
return _set_gpio_wakeup(bank, gpio, enable);
}
@@ -680,7 +688,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
void __iomem *isr_reg = NULL;
u32 isr;
- unsigned int gpio_irq, gpio_index;
+ unsigned int bit;
struct gpio_bank *bank;
int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -694,7 +702,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (WARN_ON(!isr_reg))
goto exit;
- while(1) {
+ while (1) {
u32 isr_saved, level_mask = 0;
u32 enabled;
@@ -721,14 +729,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (!isr)
break;
- gpio_irq = bank->irq_base;
- for (; isr != 0; isr >>= 1, gpio_irq++) {
- int gpio = irq_to_gpio(bank, gpio_irq);
-
- if (!(isr & 1))
- continue;
-
- gpio_index = GPIO_INDEX(bank, gpio);
+ while (isr) {
+ bit = __ffs(isr);
+ isr &= ~(1 << bit);
/*
* Some chips can't respond to both rising and falling
@@ -737,10 +740,10 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
* to respond to the IRQ for the opposite direction.
* This will be indicated in the bank toggle_mask.
*/
- if (bank->toggle_mask & (1 << gpio_index))
- _toggle_gpio_edge_triggering(bank, gpio_index);
+ if (bank->toggle_mask & (1 << bit))
+ _toggle_gpio_edge_triggering(bank, bit);
- generic_handle_irq(gpio_irq);
+ generic_handle_irq(irq_find_mapping(bank->domain, bit));
}
}
/* if bank has any level sensitive GPIO pin interrupt
@@ -756,7 +759,7 @@ exit:
static void gpio_irq_shutdown(struct irq_data *d)
{
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int gpio = irq_to_gpio(bank, d->irq);
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
@@ -767,7 +770,7 @@ static void gpio_irq_shutdown(struct irq_data *d)
static void gpio_ack_irq(struct irq_data *d)
{
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int gpio = irq_to_gpio(bank, d->irq);
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq);
_clear_gpio_irqstatus(bank, gpio);
}
@@ -775,7 +778,7 @@ static void gpio_ack_irq(struct irq_data *d)
static void gpio_mask_irq(struct irq_data *d)
{
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int gpio = irq_to_gpio(bank, d->irq);
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
@@ -787,7 +790,7 @@ static void gpio_mask_irq(struct irq_data *d)
static void gpio_unmask_irq(struct irq_data *d)
{
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int gpio = irq_to_gpio(bank, d->irq);
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq);
unsigned int irq_mask = GPIO_BIT(bank, gpio);
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
@@ -953,14 +956,6 @@ static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&bank->lock, flags);
}
-static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
-{
- struct gpio_bank *bank;
-
- bank = container_of(chip, struct gpio_bank, chip);
- return bank->irq_base + offset;
-}
-
/*---------------------------------------------------------------------*/
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
@@ -1057,7 +1052,7 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
bank->chip.direction_output = gpio_output;
bank->chip.set_debounce = gpio_debounce;
bank->chip.set = gpio_set;
- bank->chip.to_irq = gpio_2irq;
+ bank->chip.to_irq = omap_gpio_to_irq;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
@@ -1072,15 +1067,16 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
gpiochip_add(&bank->chip);
- for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) {
- irq_set_lockdep_class(j, &gpio_lock_class);
- irq_set_chip_data(j, bank);
+ for (j = 0; j < bank->width; j++) {
+ int irq = irq_create_mapping(bank->domain, j);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_data(irq, bank);
if (bank->is_mpuio) {
- omap_mpuio_alloc_gc(bank, j, bank->width);
+ omap_mpuio_alloc_gc(bank, irq, bank->width);
} else {
- irq_set_chip(j, &gpio_irq_chip);
- irq_set_handler(j, handle_simple_irq);
- set_irq_flags(j, IRQF_VALID);
+ irq_set_chip_and_handler(irq, &gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
}
}
irq_set_chained_handler(bank->irq, gpio_irq_handler);
@@ -1097,7 +1093,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
const struct omap_gpio_platform_data *pdata;
struct resource *res;
struct gpio_bank *bank;
- int ret = 0;
match = of_match_device(of_match_ptr(omap_gpio_match), dev);
@@ -1124,20 +1119,22 @@ static int omap_gpio_probe(struct platform_device *pdev)
bank->width = pdata->bank_width;
bank->is_mpuio = pdata->is_mpuio;
bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
- bank->loses_context = pdata->loses_context;
bank->regs = pdata->regs;
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
#endif
-
- bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
- if (bank->irq_base < 0) {
- dev_err(dev, "Couldn't allocate IRQ numbers\n");
- return -ENODEV;
+ if (node) {
+ if (!of_property_read_bool(node, "ti,gpio-always-on"))
+ bank->loses_context = true;
+ } else {
+ bank->loses_context = pdata->loses_context;
}
- bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base,
- 0, &irq_domain_simple_ops, NULL);
+
+ bank->domain = irq_domain_add_linear(node, bank->width,
+ &irq_domain_simple_ops, NULL);
+ if (!bank->domain)
+ return -ENODEV;
if (bank->regs->set_dataout && bank->regs->clr_dataout)
bank->set_dataout = _set_gpio_dataout_reg;
@@ -1150,18 +1147,21 @@ static int omap_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!res)) {
dev_err(dev, "Invalid mem resource\n");
+ irq_domain_remove(bank->domain);
return -ENODEV;
}
if (!devm_request_mem_region(dev, res->start, resource_size(res),
pdev->name)) {
dev_err(dev, "Region already claimed\n");
+ irq_domain_remove(bank->domain);
return -EBUSY;
}
bank->base = devm_ioremap(dev, res->start, resource_size(res));
if (!bank->base) {
dev_err(dev, "Could not ioremap\n");
+ irq_domain_remove(bank->domain);
return -ENOMEM;
}
@@ -1185,7 +1185,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
list_add_tail(&bank->node, &omap_gpio_list);
- return ret;
+ return 0;
}
#ifdef CONFIG_ARCH_OMAP2PLUS
@@ -1263,9 +1263,9 @@ static int omap_gpio_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gpio_bank *bank = platform_get_drvdata(pdev);
- int context_lost_cnt_after;
u32 l = 0, gen, gen0, gen1;
unsigned long flags;
+ int c;
spin_lock_irqsave(&bank->lock, flags);
_gpio_dbck_enable(bank);
@@ -1281,14 +1281,17 @@ static int omap_gpio_runtime_resume(struct device *dev)
__raw_writel(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
- if (bank->get_context_loss_count) {
- context_lost_cnt_after =
- bank->get_context_loss_count(bank->dev);
- if (context_lost_cnt_after != bank->context_loss_count) {
+ if (bank->loses_context) {
+ if (!bank->get_context_loss_count) {
omap_gpio_restore_context(bank);
} else {
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
+ c = bank->get_context_loss_count(bank->dev);
+ if (c != bank->context_loss_count) {
+ omap_gpio_restore_context(bank);
+ } else {
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
}
}
@@ -1297,10 +1300,6 @@ static int omap_gpio_runtime_resume(struct device *dev)
return 0;
}
- __raw_writel(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- __raw_writel(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
l = __raw_readl(bank->base + bank->regs->datain);
/*
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 9391cf1..426c51d 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -146,8 +146,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
ret = i2c_smbus_write_i2c_block_data(chip->client,
(reg << bank_shift) | REG_ADDR_AI,
NBANK(chip), val);
- }
- else {
+ } else {
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a19b745..e8faf53 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -45,6 +45,7 @@ static const struct i2c_device_id pcf857x_id[] = {
{ "pca9675", 16 },
{ "max7328", 8 },
{ "max7329", 8 },
+ { "tca9554", 8 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf857x_id);
@@ -267,7 +268,7 @@ static int pcf857x_probe(struct i2c_client *client,
}
/* Allocate, initialize, and register this gpio_chip. */
- gpio = kzalloc(sizeof *gpio, GFP_KERNEL);
+ gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
return -ENOMEM;
@@ -390,7 +391,6 @@ fail:
if (pdata && client->irq)
pcf857x_irq_domain_cleanup(gpio);
- kfree(gpio);
return status;
}
@@ -415,9 +415,7 @@ static int pcf857x_remove(struct i2c_client *client)
pcf857x_irq_domain_cleanup(gpio);
status = gpiochip_remove(&gpio->chip);
- if (status == 0)
- kfree(gpio);
- else
+ if (status)
dev_err(&client->dev, "%s --> %d\n", "remove", status);
return status;
}
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index d7008df..6a4bd0d 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
@@ -25,7 +26,6 @@
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
-#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 8325f58..df2199d 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -26,8 +27,6 @@
#include <linux/syscore_ops.h>
#include <linux/slab.h>
-#include <asm/mach/irq.h>
-
#include <mach/irqs.h>
/*
@@ -86,20 +85,61 @@ struct pxa_gpio_chip {
#endif
};
-enum {
+enum pxa_gpio_type {
PXA25X_GPIO = 0,
PXA26X_GPIO,
PXA27X_GPIO,
PXA3XX_GPIO,
PXA93X_GPIO,
MMP_GPIO = 0x10,
+ MMP2_GPIO,
+};
+
+struct pxa_gpio_id {
+ enum pxa_gpio_type type;
+ int gpio_nums;
};
static DEFINE_SPINLOCK(gpio_lock);
static struct pxa_gpio_chip *pxa_gpio_chips;
-static int gpio_type;
+static enum pxa_gpio_type gpio_type;
static void __iomem *gpio_reg_base;
+static struct pxa_gpio_id pxa25x_id = {
+ .type = PXA25X_GPIO,
+ .gpio_nums = 85,
+};
+
+static struct pxa_gpio_id pxa26x_id = {
+ .type = PXA26X_GPIO,
+ .gpio_nums = 90,
+};
+
+static struct pxa_gpio_id pxa27x_id = {
+ .type = PXA27X_GPIO,
+ .gpio_nums = 121,
+};
+
+static struct pxa_gpio_id pxa3xx_id = {
+ .type = PXA3XX_GPIO,
+ .gpio_nums = 128,
+};
+
+static struct pxa_gpio_id pxa93x_id = {
+ .type = PXA93X_GPIO,
+ .gpio_nums = 192,
+};
+
+static struct pxa_gpio_id mmp_id = {
+ .type = MMP_GPIO,
+ .gpio_nums = 128,
+};
+
+static struct pxa_gpio_id mmp2_id = {
+ .type = MMP2_GPIO,
+ .gpio_nums = 192,
+};
+
#define for_each_gpio_chip(i, c) \
for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
@@ -432,47 +472,39 @@ static struct irq_chip pxa_muxed_gpio_chip = {
.irq_set_wake = pxa_gpio_set_wake,
};
-static int pxa_gpio_nums(void)
+static int pxa_gpio_nums(struct platform_device *pdev)
{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct pxa_gpio_id *pxa_id = (struct pxa_gpio_id *)id->driver_data;
int count = 0;
-#ifdef CONFIG_ARCH_PXA
- if (cpu_is_pxa25x()) {
-#ifdef CONFIG_CPU_PXA26x
- count = 89;
- gpio_type = PXA26X_GPIO;
-#elif defined(CONFIG_PXA25x)
- count = 84;
- gpio_type = PXA26X_GPIO;
-#endif /* CONFIG_CPU_PXA26x */
- } else if (cpu_is_pxa27x()) {
- count = 120;
- gpio_type = PXA27X_GPIO;
- } else if (cpu_is_pxa93x()) {
- count = 191;
- gpio_type = PXA93X_GPIO;
- } else if (cpu_is_pxa3xx()) {
- count = 127;
- gpio_type = PXA3XX_GPIO;
- }
-#endif /* CONFIG_ARCH_PXA */
-
-#ifdef CONFIG_ARCH_MMP
- if (cpu_is_pxa168() || cpu_is_pxa910()) {
- count = 127;
- gpio_type = MMP_GPIO;
- } else if (cpu_is_mmp2()) {
- count = 191;
- gpio_type = MMP_GPIO;
+ switch (pxa_id->type) {
+ case PXA25X_GPIO:
+ case PXA26X_GPIO:
+ case PXA27X_GPIO:
+ case PXA3XX_GPIO:
+ case PXA93X_GPIO:
+ case MMP_GPIO:
+ case MMP2_GPIO:
+ gpio_type = pxa_id->type;
+ count = pxa_id->gpio_nums - 1;
+ break;
+ default:
+ count = -EINVAL;
+ break;
}
-#endif /* CONFIG_ARCH_MMP */
return count;
}
#ifdef CONFIG_OF
static struct of_device_id pxa_gpio_dt_ids[] = {
- { .compatible = "mrvl,pxa-gpio" },
- { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
+ { .compatible = "intel,pxa25x-gpio", .data = &pxa25x_id, },
+ { .compatible = "intel,pxa26x-gpio", .data = &pxa26x_id, },
+ { .compatible = "intel,pxa27x-gpio", .data = &pxa27x_id, },
+ { .compatible = "intel,pxa3xx-gpio", .data = &pxa3xx_id, },
+ { .compatible = "marvell,pxa93x-gpio", .data = &pxa93x_id, },
+ { .compatible = "marvell,mmp-gpio", .data = &mmp_id, },
+ { .compatible = "marvell,mmp2-gpio", .data = &mmp2_id, },
{}
};
@@ -492,16 +524,18 @@ const struct irq_domain_ops pxa_irq_domain_ops = {
static int pxa_gpio_probe_dt(struct platform_device *pdev)
{
- int ret, nr_banks, nr_gpios;
+ int ret, nr_gpios;
struct device_node *prev, *next, *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(pxa_gpio_dt_ids, &pdev->dev);
+ const struct pxa_gpio_id *gpio_id;
- if (!of_id) {
+ if (!of_id || !of_id->data) {
dev_err(&pdev->dev, "Failed to find gpio controller\n");
return -EFAULT;
}
- gpio_type = (int)of_id->data;
+ gpio_id = of_id->data;
+ gpio_type = gpio_id->type;
next = of_get_next_child(np, NULL);
prev = next;
@@ -510,14 +544,8 @@ static int pxa_gpio_probe_dt(struct platform_device *pdev)
ret = -EINVAL;
goto err;
}
- for (nr_banks = 1; ; nr_banks++) {
- next = of_get_next_child(np, prev);
- if (!next)
- break;
- prev = next;
- }
of_node_put(prev);
- nr_gpios = nr_banks << 5;
+ nr_gpios = gpio_id->gpio_nums;
pxa_last_gpio = nr_gpios - 1;
irq_base = irq_alloc_descs(-1, 0, nr_gpios, 0);
@@ -546,19 +574,18 @@ static int pxa_gpio_probe(struct platform_device *pdev)
int gpio, irq, ret, use_of = 0;
int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
- ret = pxa_gpio_probe_dt(pdev);
- if (ret < 0) {
- pxa_last_gpio = pxa_gpio_nums();
-#ifdef CONFIG_ARCH_PXA
- if (gpio_is_pxa_type(gpio_type))
- irq_base = PXA_GPIO_TO_IRQ(0);
-#endif
-#ifdef CONFIG_ARCH_MMP
- if (gpio_is_mmp_type(gpio_type))
- irq_base = MMP_GPIO_TO_IRQ(0);
-#endif
+ info = dev_get_platdata(&pdev->dev);
+ if (info) {
+ irq_base = info->irq_base;
+ if (irq_base <= 0)
+ return -EINVAL;
+ pxa_last_gpio = pxa_gpio_nums(pdev);
} else {
+ irq_base = 0;
use_of = 1;
+ ret = pxa_gpio_probe_dt(pdev);
+ if (ret < 0)
+ return -EINVAL;
}
if (!pxa_last_gpio)
@@ -595,14 +622,13 @@ static int pxa_gpio_probe(struct platform_device *pdev)
}
/* Initialize GPIO chips */
- info = dev_get_platdata(&pdev->dev);
pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL);
/* clear all GPIO edge detects */
for_each_gpio_chip(gpio, c) {
writel_relaxed(0, c->regbase + GFER_OFFSET);
writel_relaxed(0, c->regbase + GRER_OFFSET);
- writel_relaxed(~0,c->regbase + GEDR_OFFSET);
+ writel_relaxed(~0, c->regbase + GEDR_OFFSET);
/* unmask GPIO edge detect for AP side */
if (gpio_is_mmp_type(gpio_type))
writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
@@ -635,12 +661,24 @@ static int pxa_gpio_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id gpio_id_table[] = {
+ { "pxa25x-gpio", (unsigned long)&pxa25x_id },
+ { "pxa26x-gpio", (unsigned long)&pxa26x_id },
+ { "pxa27x-gpio", (unsigned long)&pxa27x_id },
+ { "pxa3xx-gpio", (unsigned long)&pxa3xx_id },
+ { "pxa93x-gpio", (unsigned long)&pxa93x_id },
+ { "mmp-gpio", (unsigned long)&mmp_id },
+ { "mmp2-gpio", (unsigned long)&mmp2_id },
+ { },
+};
+
static struct platform_driver pxa_gpio_driver = {
.probe = pxa_gpio_probe,
.driver = {
.name = "pxa-gpio",
.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
+ .id_table = gpio_id_table,
};
static int __init pxa_gpio_init(void)
@@ -674,7 +712,7 @@ static void pxa_gpio_resume(void)
for_each_gpio_chip(gpio, c) {
/* restore level with set/clear */
- writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET);
+ writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
new file mode 100644
index 0000000..b4ca450
--- /dev/null
+++ b/drivers/gpio/gpio-rcar.c
@@ -0,0 +1,396 @@
+/*
+ * Renesas R-Car GPIO Support
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_data/gpio-rcar.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+struct gpio_rcar_priv {
+ void __iomem *base;
+ spinlock_t lock;
+ struct gpio_rcar_config config;
+ struct platform_device *pdev;
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+ struct irq_domain *irq_domain;
+};
+
+#define IOINTSEL 0x00
+#define INOUTSEL 0x04
+#define OUTDT 0x08
+#define INDT 0x0c
+#define INTDT 0x10
+#define INTCLR 0x14
+#define INTMSK 0x18
+#define MSKCLR 0x1c
+#define POSNEG 0x20
+#define EDGLEVEL 0x24
+#define FILONOFF 0x28
+
+static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs)
+{
+ return ioread32(p->base + offs);
+}
+
+static inline void gpio_rcar_write(struct gpio_rcar_priv *p, int offs,
+ u32 value)
+{
+ iowrite32(value, p->base + offs);
+}
+
+static void gpio_rcar_modify_bit(struct gpio_rcar_priv *p, int offs,
+ int bit, bool value)
+{
+ u32 tmp = gpio_rcar_read(p, offs);
+
+ if (value)
+ tmp |= BIT(bit);
+ else
+ tmp &= ~BIT(bit);
+
+ gpio_rcar_write(p, offs, tmp);
+}
+
+static void gpio_rcar_irq_disable(struct irq_data *d)
+{
+ struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
+
+ gpio_rcar_write(p, INTMSK, ~BIT(irqd_to_hwirq(d)));
+}
+
+static void gpio_rcar_irq_enable(struct irq_data *d)
+{
+ struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
+
+ gpio_rcar_write(p, MSKCLR, BIT(irqd_to_hwirq(d)));
+}
+
+static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
+ unsigned int hwirq,
+ bool active_high_rising_edge,
+ bool level_trigger)
+{
+ unsigned long flags;
+
+ /* follow steps in the GPIO documentation for
+ * "Setting Edge-Sensitive Interrupt Input Mode" and
+ * "Setting Level-Sensitive Interrupt Input Mode"
+ */
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ /* Configure postive or negative logic in POSNEG */
+ gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
+
+ /* Configure edge or level trigger in EDGLEVEL */
+ gpio_rcar_modify_bit(p, EDGLEVEL, hwirq, !level_trigger);
+
+ /* Select "Interrupt Input Mode" in IOINTSEL */
+ gpio_rcar_modify_bit(p, IOINTSEL, hwirq, true);
+
+ /* Write INTCLR in case of edge trigger */
+ if (!level_trigger)
+ gpio_rcar_write(p, INTCLR, BIT(hwirq));
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ dev_dbg(&p->pdev->dev, "sense irq = %d, type = %d\n", hwirq, type);
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ gpio_rcar_config_interrupt_input_mode(p, hwirq, true, true);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ gpio_rcar_config_interrupt_input_mode(p, hwirq, false, true);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ gpio_rcar_config_interrupt_input_mode(p, hwirq, false, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_rcar_priv *p = dev_id;
+ u32 pending;
+ unsigned int offset, irqs_handled = 0;
+
+ while ((pending = gpio_rcar_read(p, INTDT))) {
+ offset = __ffs(pending);
+ gpio_rcar_write(p, INTCLR, BIT(offset));
+ generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
+ irqs_handled++;
+ }
+
+ return irqs_handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static inline struct gpio_rcar_priv *gpio_to_priv(struct gpio_chip *chip)
+{
+ return container_of(chip, struct gpio_rcar_priv, gpio_chip);
+}
+
+static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
+ unsigned int gpio,
+ bool output)
+{
+ struct gpio_rcar_priv *p = gpio_to_priv(chip);
+ unsigned long flags;
+
+ /* follow steps in the GPIO documentation for
+ * "Setting General Output Mode" and
+ * "Setting General Input Mode"
+ */
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ /* Configure postive logic in POSNEG */
+ gpio_rcar_modify_bit(p, POSNEG, gpio, false);
+
+ /* Select "General Input/Output Mode" in IOINTSEL */
+ gpio_rcar_modify_bit(p, IOINTSEL, gpio, false);
+
+ /* Select Input Mode or Output Mode in INOUTSEL */
+ gpio_rcar_modify_bit(p, INOUTSEL, gpio, output);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+
+ /* Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ gpio_rcar_config_general_input_output_mode(chip, offset, false);
+}
+
+static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ gpio_rcar_config_general_input_output_mode(chip, offset, false);
+ return 0;
+}
+
+static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
+{
+ return (int)(gpio_rcar_read(gpio_to_priv(chip), INDT) & BIT(offset));
+}
+
+static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct gpio_rcar_priv *p = gpio_to_priv(chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ gpio_rcar_modify_bit(p, OUTDT, offset, value);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ /* write GPIO value to output before selecting output mode of pin */
+ gpio_rcar_set(chip, offset, value);
+ gpio_rcar_config_general_input_output_mode(chip, offset, true);
+ return 0;
+}
+
+static int gpio_rcar_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
+}
+
+static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct gpio_rcar_priv *p = h->host_data;
+
+ dev_dbg(&p->pdev->dev, "map hw irq = %d, virq = %d\n", (int)hw, virq);
+
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID); /* kill me now */
+ return 0;
+}
+
+static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
+ .map = gpio_rcar_irq_domain_map,
+};
+
+static int gpio_rcar_probe(struct platform_device *pdev)
+{
+ struct gpio_rcar_config *pdata = pdev->dev.platform_data;
+ struct gpio_rcar_priv *p;
+ struct resource *io, *irq;
+ struct gpio_chip *gpio_chip;
+ struct irq_chip *irq_chip;
+ const char *name = dev_name(&pdev->dev);
+ int ret;
+
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ /* deal with driver instance configuration */
+ if (pdata)
+ p->config = *pdata;
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+ spin_lock_init(&p->lock);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!io || !irq) {
+ dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ p->base = devm_ioremap_nocache(&pdev->dev, io->start,
+ resource_size(io));
+ if (!p->base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+
+ gpio_chip = &p->gpio_chip;
+ gpio_chip->request = gpio_rcar_request;
+ gpio_chip->free = gpio_rcar_free;
+ gpio_chip->direction_input = gpio_rcar_direction_input;
+ gpio_chip->get = gpio_rcar_get;
+ gpio_chip->direction_output = gpio_rcar_direction_output;
+ gpio_chip->set = gpio_rcar_set;
+ gpio_chip->to_irq = gpio_rcar_to_irq;
+ gpio_chip->label = name;
+ gpio_chip->owner = THIS_MODULE;
+ gpio_chip->base = p->config.gpio_base;
+ gpio_chip->ngpio = p->config.number_of_pins;
+
+ irq_chip = &p->irq_chip;
+ irq_chip->name = name;
+ irq_chip->irq_mask = gpio_rcar_irq_disable;
+ irq_chip->irq_unmask = gpio_rcar_irq_enable;
+ irq_chip->irq_enable = gpio_rcar_irq_enable;
+ irq_chip->irq_disable = gpio_rcar_irq_disable;
+ irq_chip->irq_set_type = gpio_rcar_irq_set_type;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED;
+
+ p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
+ p->config.number_of_pins,
+ p->config.irq_base,
+ &gpio_rcar_irq_domain_ops, p);
+ if (!p->irq_domain) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "cannot initialize irq domain\n");
+ goto err1;
+ }
+
+ if (devm_request_irq(&pdev->dev, irq->start,
+ gpio_rcar_irq_handler, 0, name, p)) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+
+ ret = gpiochip_add(gpio_chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add GPIO controller\n");
+ goto err1;
+ }
+
+ dev_info(&pdev->dev, "driving %d GPIOs\n", p->config.number_of_pins);
+
+ /* warn in case of mismatch if irq base is specified */
+ if (p->config.irq_base) {
+ ret = irq_find_mapping(p->irq_domain, 0);
+ if (p->config.irq_base != ret)
+ dev_warn(&pdev->dev, "irq base mismatch (%u/%u)\n",
+ p->config.irq_base, ret);
+ }
+
+ ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0,
+ gpio_chip->base, gpio_chip->ngpio);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "failed to add pin range\n");
+
+ return 0;
+
+err1:
+ irq_domain_remove(p->irq_domain);
+err0:
+ return ret;
+}
+
+static int gpio_rcar_remove(struct platform_device *pdev)
+{
+ struct gpio_rcar_priv *p = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&p->gpio_chip);
+ if (ret)
+ return ret;
+
+ irq_domain_remove(p->irq_domain);
+ return 0;
+}
+
+static struct platform_driver gpio_rcar_device_driver = {
+ .probe = gpio_rcar_probe,
+ .remove = gpio_rcar_remove,
+ .driver = {
+ .name = "gpio_rcar",
+ }
+};
+
+module_platform_driver(gpio_rcar_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas R-Car GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index b3643ff..b22ca79 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -1122,8 +1122,12 @@ int samsung_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
#ifdef CONFIG_PLAT_S3C24XX
static int s3c24xx_gpiolib_fbank_to_irq(struct gpio_chip *chip, unsigned offset)
{
- if (offset < 4)
- return IRQ_EINT0 + offset;
+ if (offset < 4) {
+ if (soc_is_s3c2412())
+ return IRQ_EINT0_2412 + offset;
+ else
+ return IRQ_EINT0 + offset;
+ }
if (offset < 8)
return IRQ_EINT4 + offset - 4;
@@ -3024,7 +3028,9 @@ static __init int samsung_gpiolib_init(void)
static const struct of_device_id exynos_pinctrl_ids[] = {
{ .compatible = "samsung,exynos4210-pinctrl", },
{ .compatible = "samsung,exynos4x12-pinctrl", },
+ { .compatible = "samsung,exynos5250-pinctrl", },
{ .compatible = "samsung,exynos5440-pinctrl", },
+ { }
};
for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
if (pctrl_np && of_device_is_available(pctrl_np))
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index edae963..1e4de16 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -125,13 +125,17 @@ static int sch_gpio_resume_direction_in(struct gpio_chip *gc,
unsigned gpio_num)
{
u8 curr_dirs;
+ unsigned short offset, bit;
spin_lock(&gpio_lock);
- curr_dirs = inb(gpio_ba + RGIO);
+ offset = RGIO + gpio_num / 8;
+ bit = gpio_num % 8;
+
+ curr_dirs = inb(gpio_ba + offset);
- if (!(curr_dirs & (1 << gpio_num)))
- outb(curr_dirs | (1 << gpio_num) , gpio_ba + RGIO);
+ if (!(curr_dirs & (1 << bit)))
+ outb(curr_dirs | (1 << bit), gpio_ba + offset);
spin_unlock(&gpio_lock);
return 0;
@@ -139,22 +143,31 @@ static int sch_gpio_resume_direction_in(struct gpio_chip *gc,
static int sch_gpio_resume_get(struct gpio_chip *gc, unsigned gpio_num)
{
- return !!(inb(gpio_ba + RGLV) & (1 << gpio_num));
+ unsigned short offset, bit;
+
+ offset = RGLV + gpio_num / 8;
+ bit = gpio_num % 8;
+
+ return !!(inb(gpio_ba + offset) & (1 << bit));
}
static void sch_gpio_resume_set(struct gpio_chip *gc,
unsigned gpio_num, int val)
{
u8 curr_vals;
+ unsigned short offset, bit;
spin_lock(&gpio_lock);
- curr_vals = inb(gpio_ba + RGLV);
+ offset = RGLV + gpio_num / 8;
+ bit = gpio_num % 8;
+
+ curr_vals = inb(gpio_ba + offset);
if (val)
- outb(curr_vals | (1 << gpio_num), gpio_ba + RGLV);
+ outb(curr_vals | (1 << bit), gpio_ba + offset);
else
- outb((curr_vals & ~(1 << gpio_num)), gpio_ba + RGLV);
+ outb((curr_vals & ~(1 << bit)), gpio_ba + offset);
spin_unlock(&gpio_lock);
}
@@ -163,14 +176,18 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc,
unsigned gpio_num, int val)
{
u8 curr_dirs;
+ unsigned short offset, bit;
sch_gpio_resume_set(gc, gpio_num, val);
+ offset = RGIO + gpio_num / 8;
+ bit = gpio_num % 8;
+
spin_lock(&gpio_lock);
- curr_dirs = inb(gpio_ba + RGIO);
- if (curr_dirs & (1 << gpio_num))
- outb(curr_dirs & ~(1 << gpio_num), gpio_ba + RGIO);
+ curr_dirs = inb(gpio_ba + offset);
+ if (curr_dirs & (1 << bit))
+ outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
spin_unlock(&gpio_lock);
return 0;
@@ -204,45 +221,41 @@ static int sch_gpio_probe(struct platform_device *pdev)
gpio_ba = res->start;
switch (id) {
- case PCI_DEVICE_ID_INTEL_SCH_LPC:
- sch_gpio_core.base = 0;
- sch_gpio_core.ngpio = 10;
-
- sch_gpio_resume.base = 10;
- sch_gpio_resume.ngpio = 4;
-
- /*
- * GPIO[6:0] enabled by default
- * GPIO7 is configured by the CMC as SLPIOVR
- * Enable GPIO[9:8] core powered gpios explicitly
- */
- outb(0x3, gpio_ba + CGEN + 1);
- /*
- * SUS_GPIO[2:0] enabled by default
- * Enable SUS_GPIO3 resume powered gpio explicitly
- */
- outb(0x8, gpio_ba + RGEN);
- break;
-
- case PCI_DEVICE_ID_INTEL_ITC_LPC:
- sch_gpio_core.base = 0;
- sch_gpio_core.ngpio = 5;
-
- sch_gpio_resume.base = 5;
- sch_gpio_resume.ngpio = 9;
- break;
-
- case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
- sch_gpio_core.base = 0;
- sch_gpio_core.ngpio = 21;
-
- sch_gpio_resume.base = 21;
- sch_gpio_resume.ngpio = 9;
- break;
-
- default:
- err = -ENODEV;
- goto err_sch_gpio_core;
+ case PCI_DEVICE_ID_INTEL_SCH_LPC:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 10;
+ sch_gpio_resume.base = 10;
+ sch_gpio_resume.ngpio = 4;
+ /*
+ * GPIO[6:0] enabled by default
+ * GPIO7 is configured by the CMC as SLPIOVR
+ * Enable GPIO[9:8] core powered gpios explicitly
+ */
+ outb(0x3, gpio_ba + CGEN + 1);
+ /*
+ * SUS_GPIO[2:0] enabled by default
+ * Enable SUS_GPIO3 resume powered gpio explicitly
+ */
+ outb(0x8, gpio_ba + RGEN);
+ break;
+
+ case PCI_DEVICE_ID_INTEL_ITC_LPC:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 5;
+ sch_gpio_resume.base = 5;
+ sch_gpio_resume.ngpio = 9;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 21;
+ sch_gpio_resume.base = 21;
+ sch_gpio_resume.ngpio = 9;
+ break;
+
+ default:
+ err = -ENODEV;
+ goto err_sch_gpio_core;
}
sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index c20e051..04882a9 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -217,7 +217,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->virt = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(chip->virt))
return PTR_ERR(chip->virt);
-
+
chip->gc.dev = &pdev->dev;
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index c0595bb..d34d80d 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -282,9 +282,9 @@ static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int virq)
}
static struct irq_domain_ops tc3589x_irq_ops = {
- .map = tc3589x_gpio_irq_map,
- .unmap = tc3589x_gpio_irq_unmap,
- .xlate = irq_domain_xlate_twocell,
+ .map = tc3589x_gpio_irq_map,
+ .unmap = tc3589x_gpio_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
};
static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio,
@@ -344,7 +344,7 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
tc3589x_gpio->chip.base = (pdata) ? pdata->gpio_base : -1;
#ifdef CONFIG_OF_GPIO
- tc3589x_gpio->chip.of_node = np;
+ tc3589x_gpio->chip.of_node = np;
#endif
tc3589x_gpio->irq_base = tc3589x->irq_base ?
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 414ad91..da4cb5b 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -27,11 +27,10 @@
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
-#include <asm/mach/irq.h>
-
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
#define GPIO_BIT(x) ((x) & 0x7)
@@ -72,6 +71,7 @@ struct tegra_gpio_bank {
u32 oe[4];
u32 int_enb[4];
u32 int_lvl[4];
+ u32 wake_enb[4];
#endif
};
@@ -333,15 +333,31 @@ static int tegra_gpio_suspend(struct device *dev)
bank->oe[p] = tegra_gpio_readl(GPIO_OE(gpio));
bank->int_enb[p] = tegra_gpio_readl(GPIO_INT_ENB(gpio));
bank->int_lvl[p] = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+
+ /* Enable gpio irq for wake up source */
+ tegra_gpio_writel(bank->wake_enb[p],
+ GPIO_INT_ENB(gpio));
}
}
local_irq_restore(flags);
return 0;
}
-static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
+static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int gpio = d->hwirq;
+ u32 port, bit, mask;
+
+ port = GPIO_PORT(gpio);
+ bit = GPIO_BIT(gpio);
+ mask = BIT(bit);
+
+ if (enable)
+ bank->wake_enb[port] |= mask;
+ else
+ bank->wake_enb[port] &= ~mask;
+
return irq_set_irq_wake(bank->irq, enable);
}
#endif
@@ -353,7 +369,7 @@ static struct irq_chip tegra_gpio_irq_chip = {
.irq_unmask = tegra_gpio_irq_unmask,
.irq_set_type = tegra_gpio_irq_set_type,
#ifdef CONFIG_PM_SLEEP
- .irq_set_wake = tegra_gpio_wake_enable,
+ .irq_set_wake = tegra_gpio_irq_set_wake,
#endif
};
@@ -398,10 +414,11 @@ static int tegra_gpio_probe(struct platform_device *pdev)
int j;
match = of_match_device(tegra_gpio_of_match, &pdev->dev);
- if (match)
- config = (struct tegra_gpio_soc_config *)match->data;
- else
- config = &tegra20_gpio_config;
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ config = (struct tegra_gpio_soc_config *)match->data;
tegra_gpio_bank_stride = config->bank_stride;
tegra_gpio_upper_offset = config->upper_offset;
@@ -462,9 +479,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
}
}
-#ifdef CONFIG_OF_GPIO
tegra_gpio_chip.of_node = pdev->dev.of_node;
-#endif
gpiochip_add(&tegra_gpio_chip);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 702cca9..4377405 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -167,8 +167,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
if (ver < 3) {
ret = -EINVAL;
goto out;
- }
- else {
+ } else {
flr |= 1 << offset;
bflr |= 1 << offset;
}
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 5083825..0614621 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -133,7 +133,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.owner = THIS_MODULE;
tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
- switch(tps65910_chip_id(tps65910)) {
+ switch (tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
break;
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 26405ef..6d0feb2 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -12,8 +12,6 @@
#include <linux/module.h>
#include <linux/ucb1400.h>
-struct ucb1400_gpio_data *ucbdata;
-
static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off)
{
struct ucb1400_gpio *gpio;
@@ -50,7 +48,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
struct ucb1400_gpio *ucb = dev->dev.platform_data;
int err = 0;
- if (!(ucbdata && ucbdata->gpio_offset)) {
+ if (!(ucb && ucb->gpio_offset)) {
err = -EINVAL;
goto err;
}
@@ -58,7 +56,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, ucb);
ucb->gc.label = "ucb1400_gpio";
- ucb->gc.base = ucbdata->gpio_offset;
+ ucb->gc.base = ucb->gpio_offset;
ucb->gc.ngpio = 10;
ucb->gc.owner = THIS_MODULE;
@@ -72,8 +70,8 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
if (err)
goto err;
- if (ucbdata && ucbdata->gpio_setup)
- err = ucbdata->gpio_setup(&dev->dev, ucb->gc.ngpio);
+ if (ucb && ucb->gpio_setup)
+ err = ucb->gpio_setup(&dev->dev, ucb->gc.ngpio);
err:
return err;
@@ -85,8 +83,8 @@ static int ucb1400_gpio_remove(struct platform_device *dev)
int err = 0;
struct ucb1400_gpio *ucb = platform_get_drvdata(dev);
- if (ucbdata && ucbdata->gpio_teardown) {
- err = ucbdata->gpio_teardown(&dev->dev, ucb->gc.ngpio);
+ if (ucb && ucb->gpio_teardown) {
+ err = ucb->gpio_teardown(&dev->dev, ucb->gc.ngpio);
if (err)
return err;
}
@@ -103,11 +101,6 @@ static struct platform_driver ucb1400_gpio_driver = {
},
};
-void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data)
-{
- ucbdata = data;
-}
-
module_platform_driver(ucb1400_gpio_driver);
MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 59d7239..095ab14 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -380,10 +380,6 @@ static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
struct vprbrd *vb = gpio->vb;
gpio->gpiob_out |= (1 << offset);
- if (value)
- gpio->gpiob_val |= (1 << offset);
- else
- gpio->gpiob_val &= ~(1 << offset);
mutex_lock(&vb->lock);
diff --git a/drivers/gpio/gpio-vt8500.c b/drivers/gpio/gpio-vt8500.c
deleted file mode 100644
index 81683ca..0000000
--- a/drivers/gpio/gpio-vt8500.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/* drivers/gpio/gpio-vt8500.c
- *
- * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
- * Based on arch/arm/mach-vt8500/gpio.c:
- * - Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_device.h>
-
-/*
- We handle GPIOs by bank, each bank containing up to 32 GPIOs covered
- by one set of registers (although not all may be valid).
-
- Because different SoC's have different register offsets, we pass the
- register offsets as data in vt8500_gpio_dt_ids[].
-
- A value of NO_REG is used to indicate that this register is not
- supported. Only used for ->en at the moment.
-*/
-
-#define NO_REG 0xFFFF
-
-/*
- * struct vt8500_gpio_bank_regoffsets
- * @en: offset to enable register of the bank
- * @dir: offset to direction register of the bank
- * @data_out: offset to the data out register of the bank
- * @data_in: offset to the data in register of the bank
- * @ngpio: highest valid pin in this bank
- */
-
-struct vt8500_gpio_bank_regoffsets {
- unsigned int en;
- unsigned int dir;
- unsigned int data_out;
- unsigned int data_in;
- unsigned char ngpio;
-};
-
-struct vt8500_gpio_data {
- unsigned int num_banks;
- struct vt8500_gpio_bank_regoffsets banks[];
-};
-
-#define VT8500_BANK(__en, __dir, __out, __in, __ngpio) \
-{ \
- .en = __en, \
- .dir = __dir, \
- .data_out = __out, \
- .data_in = __in, \
- .ngpio = __ngpio, \
-}
-
-static struct vt8500_gpio_data vt8500_data = {
- .num_banks = 7,
- .banks = {
- VT8500_BANK(NO_REG, 0x3C, 0x5C, 0x7C, 9),
- VT8500_BANK(0x00, 0x20, 0x40, 0x60, 26),
- VT8500_BANK(0x04, 0x24, 0x44, 0x64, 28),
- VT8500_BANK(0x08, 0x28, 0x48, 0x68, 31),
- VT8500_BANK(0x0C, 0x2C, 0x4C, 0x6C, 19),
- VT8500_BANK(0x10, 0x30, 0x50, 0x70, 19),
- VT8500_BANK(0x14, 0x34, 0x54, 0x74, 23),
- },
-};
-
-static struct vt8500_gpio_data wm8505_data = {
- .num_banks = 10,
- .banks = {
- VT8500_BANK(0x64, 0x8C, 0xB4, 0xDC, 22),
- VT8500_BANK(0x40, 0x68, 0x90, 0xB8, 8),
- VT8500_BANK(0x44, 0x6C, 0x94, 0xBC, 32),
- VT8500_BANK(0x48, 0x70, 0x98, 0xC0, 6),
- VT8500_BANK(0x4C, 0x74, 0x9C, 0xC4, 16),
- VT8500_BANK(0x50, 0x78, 0xA0, 0xC8, 25),
- VT8500_BANK(0x54, 0x7C, 0xA4, 0xCC, 5),
- VT8500_BANK(0x58, 0x80, 0xA8, 0xD0, 5),
- VT8500_BANK(0x5C, 0x84, 0xAC, 0xD4, 12),
- VT8500_BANK(0x60, 0x88, 0xB0, 0xD8, 16),
- VT8500_BANK(0x500, 0x504, 0x508, 0x50C, 6),
- },
-};
-
-/*
- * No information about which bits are valid so we just make
- * them all available until its figured out.
- */
-static struct vt8500_gpio_data wm8650_data = {
- .num_banks = 9,
- .banks = {
- VT8500_BANK(0x40, 0x80, 0xC0, 0x00, 32),
- VT8500_BANK(0x44, 0x84, 0xC4, 0x04, 32),
- VT8500_BANK(0x48, 0x88, 0xC8, 0x08, 32),
- VT8500_BANK(0x4C, 0x8C, 0xCC, 0x0C, 32),
- VT8500_BANK(0x50, 0x90, 0xD0, 0x10, 32),
- VT8500_BANK(0x54, 0x94, 0xD4, 0x14, 32),
- VT8500_BANK(0x58, 0x98, 0xD8, 0x18, 32),
- VT8500_BANK(0x5C, 0x9C, 0xDC, 0x1C, 32),
- VT8500_BANK(0x7C, 0xBC, 0xFC, 0x3C, 32),
- VT8500_BANK(0x500, 0x504, 0x508, 0x50C, 6),
- },
-};
-
-struct vt8500_gpio_chip {
- struct gpio_chip chip;
-
- const struct vt8500_gpio_bank_regoffsets *regs;
- void __iomem *base;
-};
-
-struct vt8500_data {
- struct vt8500_gpio_chip *chip;
- void __iomem *iobase;
- int num_banks;
-};
-
-
-#define to_vt8500(__chip) container_of(__chip, struct vt8500_gpio_chip, chip)
-
-static int vt8500_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- u32 val;
- struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
-
- if (vt8500_chip->regs->en == NO_REG)
- return 0;
-
- val = readl_relaxed(vt8500_chip->base + vt8500_chip->regs->en);
- val |= BIT(offset);
- writel_relaxed(val, vt8500_chip->base + vt8500_chip->regs->en);
-
- return 0;
-}
-
-static void vt8500_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
- u32 val;
-
- if (vt8500_chip->regs->en == NO_REG)
- return;
-
- val = readl_relaxed(vt8500_chip->base + vt8500_chip->regs->en);
- val &= ~BIT(offset);
- writel_relaxed(val, vt8500_chip->base + vt8500_chip->regs->en);
-}
-
-static int vt8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
-
- u32 val = readl_relaxed(vt8500_chip->base + vt8500_chip->regs->dir);
- val &= ~BIT(offset);
- writel_relaxed(val, vt8500_chip->base + vt8500_chip->regs->dir);
-
- return 0;
-}
-
-static int vt8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
-
- u32 val = readl_relaxed(vt8500_chip->base + vt8500_chip->regs->dir);
- val |= BIT(offset);
- writel_relaxed(val, vt8500_chip->base + vt8500_chip->regs->dir);
-
- if (value) {
- val = readl_relaxed(vt8500_chip->base +
- vt8500_chip->regs->data_out);
- val |= BIT(offset);
- writel_relaxed(val, vt8500_chip->base +
- vt8500_chip->regs->data_out);
- }
- return 0;
-}
-
-static int vt8500_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
- struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
-
- return (readl_relaxed(vt8500_chip->base + vt8500_chip->regs->data_in) >>
- offset) & 1;
-}
-
-static void vt8500_gpio_set_value(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
-
- u32 val = readl_relaxed(vt8500_chip->base +
- vt8500_chip->regs->data_out);
- if (value)
- val |= BIT(offset);
- else
- val &= ~BIT(offset);
-
- writel_relaxed(val, vt8500_chip->base + vt8500_chip->regs->data_out);
-}
-
-static int vt8500_of_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec, u32 *flags)
-{
- /* bank if specificed in gpiospec->args[0] */
- if (flags)
- *flags = gpiospec->args[2];
-
- return gpiospec->args[1];
-}
-
-static int vt8500_add_chips(struct platform_device *pdev, void __iomem *base,
- const struct vt8500_gpio_data *data)
-{
- struct vt8500_data *priv;
- struct vt8500_gpio_chip *vtchip;
- struct gpio_chip *chip;
- int i;
- int pin_cnt = 0;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_data), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- priv->chip = devm_kzalloc(&pdev->dev,
- sizeof(struct vt8500_gpio_chip) * data->num_banks,
- GFP_KERNEL);
- if (!priv->chip) {
- dev_err(&pdev->dev, "failed to allocate chip memory\n");
- return -ENOMEM;
- }
-
- priv->iobase = base;
- priv->num_banks = data->num_banks;
- platform_set_drvdata(pdev, priv);
-
- vtchip = priv->chip;
-
- for (i = 0; i < data->num_banks; i++) {
- vtchip[i].base = base;
- vtchip[i].regs = &data->banks[i];
-
- chip = &vtchip[i].chip;
-
- chip->of_xlate = vt8500_of_xlate;
- chip->of_gpio_n_cells = 3;
- chip->of_node = pdev->dev.of_node;
-
- chip->request = vt8500_gpio_request;
- chip->free = vt8500_gpio_free;
- chip->direction_input = vt8500_gpio_direction_input;
- chip->direction_output = vt8500_gpio_direction_output;
- chip->get = vt8500_gpio_get_value;
- chip->set = vt8500_gpio_set_value;
- chip->can_sleep = 0;
- chip->base = pin_cnt;
- chip->ngpio = data->banks[i].ngpio;
-
- pin_cnt += data->banks[i].ngpio;
-
- gpiochip_add(chip);
- }
- return 0;
-}
-
-static struct of_device_id vt8500_gpio_dt_ids[] = {
- { .compatible = "via,vt8500-gpio", .data = &vt8500_data, },
- { .compatible = "wm,wm8505-gpio", .data = &wm8505_data, },
- { .compatible = "wm,wm8650-gpio", .data = &wm8650_data, },
- { /* Sentinel */ },
-};
-
-static int vt8500_gpio_probe(struct platform_device *pdev)
-{
- int ret;
- void __iomem *gpio_base;
- struct resource *res;
- const struct of_device_id *of_id =
- of_match_device(vt8500_gpio_dt_ids, &pdev->dev);
-
- if (!of_id) {
- dev_err(&pdev->dev, "No matching driver data\n");
- return -ENODEV;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get IO resource\n");
- return -ENODEV;
- }
-
- gpio_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!gpio_base) {
- dev_err(&pdev->dev, "Unable to map GPIO registers\n");
- return -ENOMEM;
- }
-
- ret = vt8500_add_chips(pdev, gpio_base, of_id->data);
-
- return ret;
-}
-
-static int vt8500_gpio_remove(struct platform_device *pdev)
-{
- int i;
- int ret;
- struct vt8500_data *priv = platform_get_drvdata(pdev);
- struct vt8500_gpio_chip *vtchip = priv->chip;
-
- for (i = 0; i < priv->num_banks; i++) {
- ret = gpiochip_remove(&vtchip[i].chip);
- if (ret)
- dev_warn(&pdev->dev, "gpiochip_remove returned %d\n",
- ret);
- }
-
- return 0;
-}
-
-static struct platform_driver vt8500_gpio_driver = {
- .probe = vt8500_gpio_probe,
- .remove = vt8500_gpio_remove,
- .driver = {
- .name = "vt8500-gpio",
- .owner = THIS_MODULE,
- .of_match_table = vt8500_gpio_dt_ids,
- },
-};
-
-module_platform_driver(vt8500_gpio_driver);
-
-MODULE_DESCRIPTION("VT8500 GPIO Driver");
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, vt8500_gpio_dt_ids);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index a063eb0..5c1ef2b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -17,6 +17,13 @@
#include <linux/acpi.h>
#include <linux/interrupt.h>
+struct acpi_gpio_evt_pin {
+ struct list_head node;
+ acpi_handle *evt_handle;
+ unsigned int pin;
+ unsigned int irq;
+};
+
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
if (!gc->dev)
@@ -54,7 +61,6 @@ int acpi_get_gpio(char *path, int pin)
}
EXPORT_SYMBOL_GPL(acpi_get_gpio);
-
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
acpi_handle handle = data;
@@ -64,6 +70,27 @@ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
+{
+ struct acpi_gpio_evt_pin *evt_pin = data;
+ struct acpi_object_list args;
+ union acpi_object arg;
+
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = evt_pin->pin;
+ args.count = 1;
+ args.pointer = &arg;
+
+ acpi_evaluate_object(evt_pin->evt_handle, NULL, &args, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static void acpi_gpio_evt_dh(acpi_handle handle, void *data)
+{
+ /* The address of this function is used as a key. */
+}
+
/**
* acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
* @chip: gpio chip
@@ -73,15 +100,13 @@ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
* chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
* gpio pins have acpi event methods and assigns interrupt handlers that calls
* the acpi event methods for those pins.
- *
- * Interrupts are automatically freed on driver detach
*/
-
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
{
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_resource *res;
- acpi_handle handle, ev_handle;
+ acpi_handle handle, evt_handle;
+ struct list_head *evt_pins = NULL;
acpi_status status;
unsigned int pin;
int irq, ret;
@@ -98,13 +123,30 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
- /* If a gpio interrupt has an acpi event handler method, then
- * set up an interrupt handler that calls the acpi event handler
- */
+ status = acpi_get_handle(handle, "_EVT", &evt_handle);
+ if (ACPI_SUCCESS(status)) {
+ evt_pins = kzalloc(sizeof(*evt_pins), GFP_KERNEL);
+ if (evt_pins) {
+ INIT_LIST_HEAD(evt_pins);
+ status = acpi_attach_data(handle, acpi_gpio_evt_dh,
+ evt_pins);
+ if (ACPI_FAILURE(status)) {
+ kfree(evt_pins);
+ evt_pins = NULL;
+ }
+ }
+ }
+ /*
+ * If a GPIO interrupt has an ACPI event handler method, or _EVT is
+ * present, set up an interrupt handler that calls the ACPI event
+ * handler.
+ */
for (res = buf.pointer;
res && (res->type != ACPI_RESOURCE_TYPE_END_TAG);
res = ACPI_NEXT_RESOURCE(res)) {
+ irq_handler_t handler = NULL;
+ void *data;
if (res->type != ACPI_RESOURCE_TYPE_GPIO ||
res->data.gpio.connection_type !=
@@ -115,23 +157,42 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (pin > chip->ngpio)
continue;
- sprintf(ev_name, "_%c%02X",
- res->data.gpio.triggering ? 'E' : 'L', pin);
-
- status = acpi_get_handle(handle, ev_name, &ev_handle);
- if (ACPI_FAILURE(status))
- continue;
-
irq = chip->to_irq(chip, pin);
if (irq < 0)
continue;
+ if (pin <= 255) {
+ acpi_handle ev_handle;
+
+ sprintf(ev_name, "_%c%02X",
+ res->data.gpio.triggering ? 'E' : 'L', pin);
+ status = acpi_get_handle(handle, ev_name, &ev_handle);
+ if (ACPI_SUCCESS(status)) {
+ handler = acpi_gpio_irq_handler;
+ data = ev_handle;
+ }
+ }
+ if (!handler && evt_pins) {
+ struct acpi_gpio_evt_pin *evt_pin;
+
+ evt_pin = kzalloc(sizeof(*evt_pin), GFP_KERNEL);
+ if (!evt_pin)
+ continue;
+
+ list_add_tail(&evt_pin->node, evt_pins);
+ evt_pin->evt_handle = evt_handle;
+ evt_pin->pin = pin;
+ evt_pin->irq = irq;
+ handler = acpi_gpio_irq_handler_evt;
+ data = evt_pin;
+ }
+ if (!handler)
+ continue;
+
/* Assume BIOS sets the triggering, so no flags */
- ret = devm_request_threaded_irq(chip->dev, irq, NULL,
- acpi_gpio_irq_handler,
- 0,
- "GPIO-signaled-ACPI-event",
- ev_handle);
+ ret = devm_request_threaded_irq(chip->dev, irq, NULL, handler,
+ 0, "GPIO-signaled-ACPI-event",
+ data);
if (ret)
dev_err(chip->dev,
"Failed to request IRQ %d ACPI event handler\n",
@@ -139,3 +200,119 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
}
}
EXPORT_SYMBOL(acpi_gpiochip_request_interrupts);
+
+struct acpi_gpio_lookup {
+ struct acpi_gpio_info info;
+ int index;
+ int gpio;
+ int n;
+};
+
+static int acpi_find_gpio(struct acpi_resource *ares, void *data)
+{
+ struct acpi_gpio_lookup *lookup = data;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return 1;
+
+ if (lookup->n++ == lookup->index && lookup->gpio < 0) {
+ const struct acpi_resource_gpio *agpio = &ares->data.gpio;
+
+ lookup->gpio = acpi_get_gpio(agpio->resource_source.string_ptr,
+ agpio->pin_table[0]);
+ lookup->info.gpioint =
+ agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
+ }
+
+ return 1;
+}
+
+/**
+ * acpi_get_gpio_by_index() - get a GPIO number from device resources
+ * @dev: pointer to a device to get GPIO from
+ * @index: index of GpioIo/GpioInt resource (starting from %0)
+ * @info: info pointer to fill in (optional)
+ *
+ * Function goes through ACPI resources for @dev and based on @index looks
+ * up a GpioIo/GpioInt resource, translates it to the Linux GPIO number,
+ * and returns it. @index matches GpioIo/GpioInt resources only so if there
+ * are total %3 GPIO resources, the index goes from %0 to %2.
+ *
+ * If the GPIO cannot be translated or there is an error, negative errno is
+ * returned.
+ *
+ * Note: if the GPIO resource has multiple entries in the pin list, this
+ * function only returns the first.
+ */
+int acpi_get_gpio_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info)
+{
+ struct acpi_gpio_lookup lookup;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.index = index;
+ lookup.gpio = -ENODEV;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, acpi_find_gpio,
+ &lookup);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (lookup.gpio >= 0 && info)
+ *info = lookup.info;
+
+ return lookup.gpio;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gpio_by_index);
+
+/**
+ * acpi_gpiochip_free_interrupts() - Free GPIO _EVT ACPI event interrupts.
+ * @chip: gpio chip
+ *
+ * Free interrupts associated with the _EVT method for the given GPIO chip.
+ *
+ * The remaining ACPI event interrupts associated with the chip are freed
+ * automatically.
+ */
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct list_head *evt_pins;
+ struct acpi_gpio_evt_pin *evt_pin, *ep;
+
+ if (!chip->dev || !chip->to_irq)
+ return;
+
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_evt_dh, (void **)&evt_pins);
+ if (ACPI_FAILURE(status))
+ return;
+
+ list_for_each_entry_safe_reverse(evt_pin, ep, evt_pins, node) {
+ devm_free_irq(chip->dev, evt_pin->irq, evt_pin);
+ list_del(&evt_pin->node);
+ kfree(evt_pin);
+ }
+
+ acpi_detach_data(handle, acpi_gpio_evt_dh);
+ kfree(evt_pins);
+}
+EXPORT_SYMBOL(acpi_gpiochip_free_interrupts);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 465f4ca..665f953 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -61,7 +61,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
* in flags for the GPIO.
*/
int of_get_named_gpio_flags(struct device_node *np, const char *propname,
- int index, enum of_gpio_flags *flags)
+ int index, enum of_gpio_flags *flags)
{
/* Return -EPROBE_DEFER to support probe() functions to be called
* later when the GPIO actually becomes available
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 30879df..d8a22c2 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
obj-y += drm/ vga/
+obj-$(CONFIG_TEGRA_HOST1X) += host1x/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1e82882..b16c50e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -215,8 +215,8 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
-source "drivers/gpu/drm/tegra/Kconfig"
-
source "drivers/gpu/drm/omapdrm/Kconfig"
source "drivers/gpu/drm/tilcdc/Kconfig"
+
+source "drivers/gpu/drm/qxl/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0d59b24..1c9f243 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
-obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
+obj-$(CONFIG_DRM_QXL) += qxl/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5284292..02e52d5 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -241,6 +241,8 @@ struct ast_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 34931fe..fbc0823 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
obj = afbdev->afb.obj;
bo = gem_to_ast_bo(obj);
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = ast_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}
+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
}
unmap = true;
}
- for (i = y; i < y + height; i++) {
+ for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
if (unmap)
@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev)
ast->fbdev = afbdev;
afbdev->helper.funcs = &ast_fb_helper_funcs;
+ spin_lock_init(&afbdev->dirty_lock);
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 3602731..09da339 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 6e0cc72..7ca0595 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -154,6 +154,8 @@ struct cirrus_fbdev {
struct list_head fbdev_list;
void *sysram;
int size;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
struct cirrus_bo {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index e25afcc..3541b56 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
obj = afbdev->gfb.obj;
bo = gem_to_cirrus_bo(obj);
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = cirrus_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}
+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
cdev->mode_info.gfbdev = gfbdev;
gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+ spin_lock_init(&gfbdev->dirty_lock);
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1413a26..2ed8cfc 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index a575cb2..bb8f580 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -105,12 +105,11 @@ drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
- struct scatterlist *sg;
- int i;
+ struct sg_page_iter sg_iter;
mb();
- for_each_sg(st->sgl, sg, st->nents, i)
- drm_clflush_page(sg_page(sg));
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
return;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index dd64a06..3a8f7e6d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -178,9 +178,6 @@ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
};
-DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
- drm_dirty_info_enum_list)
-
struct drm_conn_prop_enum_list {
int type;
char *name;
@@ -412,7 +409,7 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, id);
if (fb)
- kref_get(&fb->refcount);
+ drm_framebuffer_reference(fb);
mutex_unlock(&dev->mode_config.fb_lock);
return fb;
@@ -706,7 +703,6 @@ int drm_connector_init(struct drm_device *dev,
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
- INIT_LIST_HEAD(&connector->user_modes);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
@@ -747,9 +743,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
- list_for_each_entry_safe(mode, t, &connector->user_modes, head)
- drm_mode_remove(connector, mode);
-
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
@@ -1120,45 +1113,7 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- mutex_init(&dev->mode_config.fb_lock);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- INIT_LIST_HEAD(&dev->mode_config.plane_list);
- idr_init(&dev->mode_config.crtc_idr);
-
- drm_modeset_lock_all(dev);
- drm_mode_create_standard_connector_properties(dev);
- drm_modeset_unlock_all(dev);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
@@ -1203,69 +1158,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
- struct drm_plane *plane, *plt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- /*
- * Single-threaded teardown context, so it's not required to grab the
- * fb_lock to protect against concurrent fb_list access. Contrary, it
- * would actually deadlock with the drm_framebuffer_cleanup function.
- *
- * Also, if there are any framebuffers left, that's a driver leak now,
- * so politely WARN about this.
- */
- WARN_ON(!list_empty(&dev->mode_config.fb_list));
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- drm_framebuffer_remove(fb);
- }
-
- list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
- head) {
- plane->funcs->destroy(plane);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
- idr_destroy(&dev->mode_config.crtc_idr);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
@@ -2717,192 +2609,6 @@ void drm_fb_release(struct drm_file *priv)
mutex_unlock(&priv->fbs_lock);
}
-/**
- * drm_mode_attachmode - add a mode to the user mode list
- * @dev: DRM device
- * @connector: connector to add the mode to
- * @mode: mode to add
- *
- * Add @mode to @connector's user mode list.
- */
-static void drm_mode_attachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_add_tail(&mode->head, &connector->user_modes);
-}
-
-int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
- int ret = 0;
- struct drm_display_mode *dup_mode, *next;
- LIST_HEAD(list);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- continue;
- if (connector->encoder->crtc == crtc) {
- dup_mode = drm_mode_duplicate(dev, mode);
- if (!dup_mode) {
- ret = -ENOMEM;
- goto out;
- }
- list_add_tail(&dup_mode->head, &list);
- }
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- continue;
- if (connector->encoder->crtc == crtc)
- list_move_tail(list.next, &connector->user_modes);
- }
-
- WARN_ON(!list_empty(&list));
-
- out:
- list_for_each_entry_safe(dup_mode, next, &list, head)
- drm_mode_destroy(dev, dup_mode);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-
-static int drm_mode_detachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int found = 0;
- int ret = 0;
- struct drm_display_mode *match_mode, *t;
-
- list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
- if (drm_mode_equal(match_mode, mode)) {
- list_del(&match_mode->head);
- drm_mode_destroy(dev, match_mode);
- found = 1;
- break;
- }
- }
-
- if (!found)
- ret = -EINVAL;
-
- return ret;
-}
-
-int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_mode_detachmode(dev, connector, mode);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_detachmode_crtc);
-
-/**
- * drm_fb_attachmode - Attach a user mode to an connector
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * This attaches a user specified mode to an connector.
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_attachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- struct drm_mode_object *obj;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = drm_crtc_convert_umode(mode, umode);
- if (ret) {
- DRM_DEBUG_KMS("Invalid mode\n");
- drm_mode_destroy(dev, mode);
- goto out;
- }
-
- drm_mode_attachmode(dev, connector, mode);
-out:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-
-/**
- * drm_fb_detachmode - Detach a user specified mode from an connector
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_detachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_object *obj;
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode mode;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
-
- ret = drm_crtc_convert_umode(&mode, umode);
- if (ret) {
- DRM_DEBUG_KMS("Invalid mode\n");
- goto out;
- }
-
- ret = drm_mode_detachmode(dev, connector, &mode);
-out:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values)
{
@@ -3739,6 +3445,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
}
+ if (crtc->fb->pixel_format != fb->pixel_format) {
+ DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -4064,3 +3776,110 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
}
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_connector_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+ head) {
+ drm_property_destroy_blob(dev, blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_remove(fb);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ idr_destroy(&dev->mode_config.crtc_idr);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7b2d378..e974f93 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -648,6 +648,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
} else if (set->fb->bits_per_pixel !=
set->crtc->fb->bits_per_pixel) {
mode_changed = true;
+ } else if (set->fb->pixel_format !=
+ set->crtc->fb->pixel_format) {
+ mode_changed = true;
} else
fb_changed = true;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 25f91cd..8d4f290 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -60,7 +60,7 @@ static int drm_version(struct drm_device *dev, void *data,
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
/** Ioctl table */
-static struct drm_ioctl_desc drm_ioctls[] = {
+static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
@@ -150,8 +150,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
- struct drm_ioctl_desc *ioctl;
+ const struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
@@ -408,6 +408,7 @@ long drm_ioctl(struct file *filp,
usize = asize = _IOC_SIZE(cmd);
if (drv_size > asize)
asize = drv_size;
+ cmd = ioctl->cmd_drv;
}
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
ioctl = &drm_ioctls[nr];
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e2acfdb..9e62bbe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -587,284 +587,348 @@ static const struct drm_display_mode edid_cea_modes[] = {
/* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 100, },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 100, },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 100, },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 100, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 120, },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 120, },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 120, },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 120, },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 200, },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 200, },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 200, },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 200, },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 240, },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 240, },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 240, },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 240, },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, },
};
/*** DDC fetch and block validation ***/
@@ -2266,13 +2330,34 @@ EXPORT_SYMBOL(drm_find_cea_extension);
*/
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
- struct drm_display_mode *cea_mode;
u8 mode;
+ if (!to_match->clock)
+ return 0;
+
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
- cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+ const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
+ unsigned int clock1, clock2;
+
+ clock1 = clock2 = cea_mode->clock;
- if (drm_mode_equal(to_match, cea_mode))
+ /* Check both 60Hz and 59.94Hz */
+ if (cea_mode->vrefresh % 6 == 0) {
+ /*
+ * edid_cea_modes contains the 59.94Hz
+ * variant for 240 and 480 line modes,
+ * and the 60Hz variant otherwise.
+ */
+ if (cea_mode->vdisplay == 240 ||
+ cea_mode->vdisplay == 480)
+ clock1 = clock1 * 1001 / 1000;
+ else
+ clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
+ }
+
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks(to_match, cea_mode))
return mode + 1;
}
return 0;
@@ -2294,6 +2379,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
if (newmode) {
+ newmode->vrefresh = 0;
drm_mode_probed_add(connector, newmode);
modes++;
}
@@ -2511,6 +2597,65 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
EXPORT_SYMBOL(drm_edid_to_eld);
/**
+ * drm_edid_to_sad - extracts SADs from EDID
+ * @edid: EDID to parse
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found SADs or negative number on error.
+ */
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUPP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == AUDIO_BLOCK) {
+ int j;
+ dbl = cea_db_payload_len(db);
+
+ count = dbl / 3; /* SAD is 3B */
+ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+ if (!*sads)
+ return -ENOMEM;
+ for (j = 0; j < count; j++) {
+ u8 *sad = &db[1 + j * 3];
+
+ (*sads)[j].format = (sad[0] & 0x78) >> 3;
+ (*sads)[j].channels = sad[0] & 0x7;
+ (*sads)[j].freq = sad[1] & 0x7F;
+ (*sads)[j].byte2 = sad[2];
+ }
+ break;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_edid_to_sad);
+
+/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 38d3943..fa445dd 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,10 +31,11 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
-#define GENERIC_EDIDS 4
+#define GENERIC_EDIDS 5
static char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1024x768.bin",
"edid/1280x1024.bin",
+ "edid/1600x1200.bin",
"edid/1680x1050.bin",
"edid/1920x1080.bin",
};
@@ -79,6 +80,24 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
+ 0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
+ 0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
+ 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 892ff9f..b78cbe7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1398,7 +1398,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
- int i, ret;
+ int i;
DRM_DEBUG_KMS("\n");
@@ -1419,16 +1419,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
drm_enable_connectors(fb_helper, enabled);
- ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
- if (!ret) {
- ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
- if (!ret)
+ if (!(fb_helper->funcs->initial_config &&
+ fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ enabled, width, height))) {
+ memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+ memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+
+ if (!drm_target_cloned(fb_helper,
+ modes, enabled, width, height) &&
+ !drm_target_preferred(fb_helper,
+ modes, enabled, width, height))
DRM_ERROR("Unable to find initial modes\n");
- }
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+ width, height);
- drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+ }
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index af779ae..cf919e3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -205,11 +205,11 @@ static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
if (obj->import_attach) {
- drm_prime_remove_imported_buf_handle(&filp->prime,
+ drm_prime_remove_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
if (obj->export_dma_buf) {
- drm_prime_remove_imported_buf_handle(&filp->prime,
+ drm_prime_remove_buf_handle(&filp->prime,
obj->export_dma_buf);
}
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f83f071..faa79df 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -848,6 +848,26 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock)
return false;
+ return drm_mode_equal_no_clocks(mode1, mode2);
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_equal_no_clocks - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
mode1->hsync_end == mode2->hsync_end &&
@@ -863,7 +883,7 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
return false;
}
-EXPORT_SYMBOL(drm_mode_equal);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index bd719e9..14194b6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -152,7 +152,7 @@ static const char *drm_pci_get_name(struct drm_device *dev)
return pdriver->name;
}
-int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
@@ -194,9 +194,9 @@ err:
return ret;
}
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u)
+static int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
@@ -266,7 +266,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
return 0;
}
-int drm_pci_agp_init(struct drm_device *dev)
+static int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 366910d..dcde352 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,6 +62,7 @@ struct drm_prime_member {
struct dma_buf *dma_buf;
uint32_t handle;
};
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
@@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
{
struct drm_gem_object *obj;
void *buf;
- int ret;
+ int ret = 0;
+ struct dma_buf *dmabuf;
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
@@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
/* re-export the original imported object */
if (obj->import_attach) {
- get_dma_buf(obj->import_attach->dmabuf);
- *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return 0;
+ dmabuf = obj->import_attach->dmabuf;
+ goto out_have_obj;
}
if (obj->export_dma_buf) {
- get_dma_buf(obj->export_dma_buf);
- *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
- drm_gem_object_unreference_unlocked(obj);
- } else {
- buf = dev->driver->gem_prime_export(dev, obj, flags);
- if (IS_ERR(buf)) {
- /* normally the created dma-buf takes ownership of the ref,
- * but if that fails then drop the ref
- */
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return PTR_ERR(buf);
- }
- obj->export_dma_buf = buf;
- *prime_fd = dma_buf_fd(buf, flags);
+ dmabuf = obj->export_dma_buf;
+ goto out_have_obj;
+ }
+
+ buf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(buf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ ret = PTR_ERR(buf);
+ goto out;
}
+ obj->export_dma_buf = buf;
+
/* if we've exported this buffer the cheat and add it to the import list
* so we get the correct handle back
*/
- ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
- if (ret) {
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return ret;
- }
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ obj->export_dma_buf, handle);
+ if (ret)
+ goto out;
+ *prime_fd = dma_buf_fd(buf, flags);
mutex_unlock(&file_priv->prime.lock);
return 0;
+
+out_have_obj:
+ get_dma_buf(dmabuf);
+ *prime_fd = dma_buf_fd(dmabuf, flags);
+out:
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -268,7 +271,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
- dma_buf_put(dma_buf);
return obj;
}
}
@@ -277,6 +279,8 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_PTR(PTR_ERR(attach));
+ get_dma_buf(dma_buf);
+
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
ret = PTR_ERR(sgt);
@@ -297,6 +301,8 @@ fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_prime_import);
@@ -314,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
- ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
if (!ret) {
ret = 0;
@@ -333,12 +339,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
if (ret)
goto out_put;
- ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
dma_buf, *handle);
if (ret)
goto fail;
mutex_unlock(&file_priv->prime.lock);
+
+ dma_buf_put(dma_buf);
+
return 0;
fail:
@@ -401,21 +410,17 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
{
struct sg_table *sg = NULL;
- struct scatterlist *iter;
- int i;
int ret;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sg)
goto out;
- ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
+ ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
if (ret)
goto out;
- for_each_sg(sg->sgl, iter, nr_pages, i)
- sg_set_page(iter, pages[i], PAGE_SIZE, 0);
-
return sg;
out:
kfree(sg);
@@ -483,15 +488,12 @@ EXPORT_SYMBOL(drm_prime_init_file_private);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
- struct drm_prime_member *member, *safe;
- list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
- list_del(&member->entry);
- kfree(member);
- }
+ /* by now drm_gem_release should've made sure the list is empty */
+ WARN_ON(!list_empty(&prime_fpriv->head));
}
EXPORT_SYMBOL(drm_prime_destroy_file_private);
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -499,14 +501,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
if (!member)
return -ENOMEM;
+ get_dma_buf(dma_buf);
member->dma_buf = dma_buf;
member->handle = handle;
list_add(&member->entry, &prime_fpriv->head);
return 0;
}
-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
{
struct drm_prime_member *member;
@@ -518,19 +520,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
}
return -ENOENT;
}
-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
{
struct drm_prime_member *member, *safe;
mutex_lock(&prime_fpriv->lock);
list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
if (member->dma_buf == dma_buf) {
+ dma_buf_put(dma_buf);
list_del(&member->entry);
kfree(member);
}
}
mutex_unlock(&prime_fpriv->lock);
}
-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index ff5456b..d7f2324 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,7 +49,7 @@
/**
* Proc file list.
*/
-static struct drm_info_list drm_proc_list[] = {
+static const struct drm_info_list drm_proc_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
@@ -63,7 +63,7 @@ static struct drm_info_list drm_proc_list[] = {
static int drm_proc_open(struct inode *inode, struct file *file)
{
- struct drm_info_node* node = PDE(inode)->data;
+ struct drm_info_node* node = PDE_DATA(inode);
return single_open(file, node->info_ent->show, node);
}
@@ -89,13 +89,13 @@ static const struct file_operations drm_proc_fops = {
* Create a given set of proc files represented by an array of
* gdm_proc_lists in the given root directory.
*/
-static int drm_proc_create_files(struct drm_info_list *files, int count,
+static int drm_proc_create_files(const struct drm_info_list *files, int count,
struct proc_dir_entry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
struct drm_info_node *tmp;
- int i, ret;
+ int i;
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
@@ -105,10 +105,9 @@ static int drm_proc_create_files(struct drm_info_list *files, int count,
continue;
tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
+ if (!tmp)
+ return -1;
+
tmp->minor = minor;
tmp->info_ent = &files[i];
list_add(&tmp->list, &minor->proc_nodes.list);
@@ -116,28 +115,20 @@ static int drm_proc_create_files(struct drm_info_list *files, int count,
ent = proc_create_data(files[i].name, S_IRUGO, root,
&drm_proc_fops, tmp);
if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- root->name, files[i].name);
+ DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
+ minor->index, files[i].name);
list_del(&tmp->list);
kfree(tmp);
- ret = -1;
- goto fail;
+ return -1;
}
-
}
return 0;
-
-fail:
- for (i = 0; i < count; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
- return ret;
}
/**
* Initialize the DRI proc filesystem for a device
*
* \param dev DRM device
- * \param minor device minor number
* \param root DRI proc dir entry.
* \param dev_root resulting DRI device proc dir entry.
* \return root entry pointer on success, or NULL on failure.
@@ -146,14 +137,13 @@ fail:
* "/proc/dri/%minor%/", and each entry in proc_list as
* "/proc/dri/%minor%/%name%".
*/
-int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
+int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
{
- char name[64];
+ char name[12];
int ret;
INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%d", minor_id);
+ sprintf(name, "%u", minor->index);
minor->proc_root = proc_mkdir(name, root);
if (!minor->proc_root) {
DRM_ERROR("Cannot create /proc/dri/%s\n", name);
@@ -163,7 +153,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
minor->proc_root, minor);
if (ret) {
- remove_proc_entry(name, root);
+ remove_proc_subtree(name, root);
minor->proc_root = NULL;
DRM_ERROR("Failed to create core drm proc files\n");
return ret;
@@ -172,7 +162,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
return 0;
}
-static int drm_proc_remove_files(struct drm_info_list *files, int count,
+static int drm_proc_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
@@ -213,8 +203,7 @@ int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
sprintf(name, "%d", minor->index);
- remove_proc_entry(name, root);
-
+ remove_proc_subtree(name, root);
return 0;
}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7d30802..16f3ec5 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -352,7 +352,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
idr_replace(&drm_minors_idr, new_minor, minor_id);
if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
+ ret = drm_proc_init(new_minor, drm_proc_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
goto err_mem;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index db7bd29..1d4f7c9 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -422,6 +422,7 @@ void drm_vm_open_locked(struct drm_device *dev,
list_add(&vma_entry->head, &dev->vmalist);
}
}
+EXPORT_SYMBOL_GPL(drm_vm_open_locked);
static void drm_vm_open(struct vm_area_struct *vma)
{
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 046bcda..772c62a 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -24,7 +24,9 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+ depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+ select FB_MODE_HELPERS
+ select VIDEOMODE_HELPERS
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -54,7 +56,7 @@ config DRM_EXYNOS_IPP
config DRM_EXYNOS_FIMC
bool "Exynos DRM FIMC"
- depends on DRM_EXYNOS_IPP
+ depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
help
Choose this option if you want to use Exynos FIMC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 4c5b685..8bcc13a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -124,7 +124,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
}
count = drm_add_edid_modes(connector, edid);
- if (count < 0) {
+ if (!count) {
DRM_ERROR("Add edid modes failed %d\n", count);
goto out;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index ba0a3aa..ff7f2a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
- dma_buf_put(dma_buf);
return obj;
}
}
@@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
if (IS_ERR(attach))
return ERR_PTR(-EINVAL);
+ get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
@@ -298,6 +298,8 @@ err_unmap_attach:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 3da5c2d..ba6d995 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -380,6 +380,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&ipp_driver);
if (ret < 0)
goto out_ipp;
+
+ ret = exynos_platform_device_ipp_register();
+ if (ret < 0)
+ goto out_ipp_dev;
#endif
ret = platform_driver_register(&exynos_drm_platform_driver);
@@ -388,7 +392,7 @@ static int __init exynos_drm_init(void)
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
- if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ if (IS_ERR(exynos_drm_pdev)) {
ret = PTR_ERR(exynos_drm_pdev);
goto out;
}
@@ -400,6 +404,8 @@ out:
out_drm:
#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
+out_ipp_dev:
platform_driver_unregister(&ipp_driver);
out_ipp:
#endif
@@ -456,6 +462,7 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&exynos_drm_platform_driver);
#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
platform_driver_unregister(&ipp_driver);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 4606fac..680a7c1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -322,13 +322,23 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
* this function registers exynos drm hdmi platform device. It ensures only one
* instance of the device is created.
*/
-extern int exynos_platform_device_hdmi_register(void);
+int exynos_platform_device_hdmi_register(void);
/*
* this function unregisters exynos drm hdmi platform device if it exists.
*/
void exynos_platform_device_hdmi_unregister(void);
+/*
+ * this function registers exynos drm ipp platform device.
+ */
+int exynos_platform_device_ipp_register(void);
+
+/*
+ * this function unregisters exynos drm ipp platform device if it exists.
+ */
+void exynos_platform_device_ipp_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 411f69b7..773f583 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,11 +12,12 @@
*
*/
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -76,6 +77,27 @@ enum fimc_wb {
FIMC_WB_B,
};
+enum {
+ FIMC_CLK_LCLK,
+ FIMC_CLK_GATE,
+ FIMC_CLK_WB_A,
+ FIMC_CLK_WB_B,
+ FIMC_CLK_MUX,
+ FIMC_CLK_PARENT,
+ FIMC_CLKS_MAX
+};
+
+static const char * const fimc_clock_names[] = {
+ [FIMC_CLK_LCLK] = "sclk_fimc",
+ [FIMC_CLK_GATE] = "fimc",
+ [FIMC_CLK_WB_A] = "pxl_async0",
+ [FIMC_CLK_WB_B] = "pxl_async1",
+ [FIMC_CLK_MUX] = "mux",
+ [FIMC_CLK_PARENT] = "parent",
+};
+
+#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
+
/*
* A structure of scaler.
*
@@ -119,28 +141,16 @@ struct fimc_capability {
};
/*
- * A structure of fimc driver data.
- *
- * @parent_clk: name of parent clock.
- */
-struct fimc_driverdata {
- char *parent_clk;
-};
-
-/*
* A structure of fimc context.
*
* @ippdrv: prepare initialization using ippdrv.
* @regs_res: register resources.
* @regs: memory mapped io registers.
* @lock: locking of operations.
- * @sclk_fimc_clk: fimc source clock.
- * @fimc_clk: fimc clock.
- * @wb_clk: writeback a clock.
- * @wb_b_clk: writeback b clock.
+ * @clocks: fimc clocks.
+ * @clk_frequency: LCLK clock frequency.
+ * @sysreg: handle to SYSREG block regmap.
* @sc: scaler infomations.
- * @odr: ordering of YUV.
- * @ver: fimc version.
* @pol: porarity of writeback.
* @id: fimc id.
* @irq: irq number.
@@ -151,12 +161,10 @@ struct fimc_context {
struct resource *regs_res;
void __iomem *regs;
struct mutex lock;
- struct clk *sclk_fimc_clk;
- struct clk *fimc_clk;
- struct clk *wb_clk;
- struct clk *wb_b_clk;
+ struct clk *clocks[FIMC_CLKS_MAX];
+ u32 clk_frequency;
+ struct regmap *sysreg;
struct fimc_scaler sc;
- struct fimc_driverdata *ddata;
struct exynos_drm_ipp_pol pol;
int id;
int irq;
@@ -200,17 +208,13 @@ static void fimc_sw_reset(struct fimc_context *ctx)
fimc_write(0x0, EXYNOS_CIFCNTSEQ);
}
-static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
{
- u32 camblk_cfg;
-
DRM_DEBUG_KMS("%s\n", __func__);
- camblk_cfg = readl(SYSREG_CAMERA_BLK);
- camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
- camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
-
- writel(camblk_cfg, SYSREG_CAMERA_BLK);
+ return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
+ SYSREG_FIMD0WB_DEST_MASK,
+ ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
}
static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
@@ -1301,14 +1305,12 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
if (enable) {
- clk_enable(ctx->sclk_fimc_clk);
- clk_enable(ctx->fimc_clk);
- clk_enable(ctx->wb_clk);
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
ctx->suspended = false;
} else {
- clk_disable(ctx->sclk_fimc_clk);
- clk_disable(ctx->fimc_clk);
- clk_disable(ctx->wb_clk);
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
ctx->suspended = true;
}
@@ -1613,7 +1615,11 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_handle_lastend(ctx, true);
/* setup FIMD */
- fimc_set_camblk_fimd0_wb(ctx);
+ ret = fimc_set_camblk_fimd0_wb(ctx);
+ if (ret < 0) {
+ dev_err(dev, "camblk setup failed.\n");
+ return ret;
+ }
set_wb.enable = 1;
set_wb.refresh = property->refresh_rate;
@@ -1713,76 +1719,118 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_write(cfg, EXYNOS_CIGCTRL);
}
+static void fimc_put_clocks(struct fimc_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < FIMC_CLKS_MAX; i++) {
+ if (IS_ERR(ctx->clocks[i]))
+ continue;
+ clk_put(ctx->clocks[i]);
+ ctx->clocks[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_setup_clocks(struct fimc_context *ctx)
+{
+ struct device *fimc_dev = ctx->ippdrv.dev;
+ struct device *dev;
+ int ret, i;
+
+ for (i = 0; i < FIMC_CLKS_MAX; i++)
+ ctx->clocks[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < FIMC_CLKS_MAX; i++) {
+ if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
+ dev = fimc_dev->parent;
+ else
+ dev = fimc_dev;
+
+ ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
+ if (IS_ERR(ctx->clocks[i])) {
+ if (i >= FIMC_CLK_MUX)
+ break;
+ ret = PTR_ERR(ctx->clocks[i]);
+ dev_err(fimc_dev, "failed to get clock: %s\n",
+ fimc_clock_names[i]);
+ goto e_clk_free;
+ }
+ }
+
+ /* Optional FIMC LCLK parent clock setting */
+ if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
+ ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
+ ctx->clocks[FIMC_CLK_PARENT]);
+ if (ret < 0) {
+ dev_err(fimc_dev, "failed to set parent.\n");
+ goto e_clk_free;
+ }
+ }
+
+ ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
+ if (ret < 0)
+ goto e_clk_free;
+
+ ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
+ if (!ret)
+ return ret;
+e_clk_free:
+ fimc_put_clocks(ctx);
+ return ret;
+}
+
+static int fimc_parse_dt(struct fimc_context *ctx)
+{
+ struct device_node *node = ctx->ippdrv.dev->of_node;
+
+ /* Handle only devices that support the LCD Writeback data path */
+ if (!of_property_read_bool(node, "samsung,lcd-wb"))
+ return -ENODEV;
+
+ if (of_property_read_u32(node, "clock-frequency",
+ &ctx->clk_frequency))
+ ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
+
+ ctx->id = of_alias_get_id(node, "fimc");
+
+ if (ctx->id < 0) {
+ dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int fimc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx;
- struct clk *parent_clk;
struct resource *res;
struct exynos_drm_ippdrv *ippdrv;
- struct exynos_drm_fimc_pdata *pdata;
- struct fimc_driverdata *ddata;
int ret;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(dev, "no platform data specified.\n");
- return -EINVAL;
+ if (!dev->of_node) {
+ dev_err(dev, "device tree node not found.\n");
+ return -ENODEV;
}
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ddata = (struct fimc_driverdata *)
- platform_get_device_id(pdev)->driver_data;
-
- /* clock control */
- ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
- if (IS_ERR(ctx->sclk_fimc_clk)) {
- dev_err(dev, "failed to get src fimc clock.\n");
- return PTR_ERR(ctx->sclk_fimc_clk);
- }
- clk_enable(ctx->sclk_fimc_clk);
-
- ctx->fimc_clk = devm_clk_get(dev, "fimc");
- if (IS_ERR(ctx->fimc_clk)) {
- dev_err(dev, "failed to get fimc clock.\n");
- clk_disable(ctx->sclk_fimc_clk);
- return PTR_ERR(ctx->fimc_clk);
- }
-
- ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
- if (IS_ERR(ctx->wb_clk)) {
- dev_err(dev, "failed to get writeback a clock.\n");
- clk_disable(ctx->sclk_fimc_clk);
- return PTR_ERR(ctx->wb_clk);
- }
-
- ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
- if (IS_ERR(ctx->wb_b_clk)) {
- dev_err(dev, "failed to get writeback b clock.\n");
- clk_disable(ctx->sclk_fimc_clk);
- return PTR_ERR(ctx->wb_b_clk);
- }
+ ctx->ippdrv.dev = dev;
- parent_clk = devm_clk_get(dev, ddata->parent_clk);
-
- if (IS_ERR(parent_clk)) {
- dev_err(dev, "failed to get parent clock.\n");
- clk_disable(ctx->sclk_fimc_clk);
- return PTR_ERR(parent_clk);
- }
+ ret = fimc_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
- if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
- dev_err(dev, "failed to set parent.\n");
- clk_disable(ctx->sclk_fimc_clk);
- return -EINVAL;
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(ctx->sysreg);
}
- devm_clk_put(dev, parent_clk);
- clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
-
/* resource memory */
ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
@@ -1804,13 +1852,11 @@ static int fimc_probe(struct platform_device *pdev)
return ret;
}
- /* context initailization */
- ctx->id = pdev->id;
- ctx->pol = pdata->pol;
- ctx->ddata = ddata;
+ ret = fimc_setup_clocks(ctx);
+ if (ret < 0)
+ goto err_free_irq;
ippdrv = &ctx->ippdrv;
- ippdrv->dev = dev;
ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
ippdrv->check_property = fimc_ippdrv_check_property;
@@ -1820,7 +1866,7 @@ static int fimc_probe(struct platform_device *pdev)
ret = fimc_init_prop_list(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to init property list.\n");
- goto err_get_irq;
+ goto err_put_clk;
}
DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
@@ -1835,17 +1881,18 @@ static int fimc_probe(struct platform_device *pdev)
ret = exynos_drm_ippdrv_register(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to register drm fimc device.\n");
- goto err_ippdrv_register;
+ goto err_pm_dis;
}
dev_info(&pdev->dev, "drm fimc registered successfully.\n");
return 0;
-err_ippdrv_register:
- devm_kfree(dev, ippdrv->prop_list);
+err_pm_dis:
pm_runtime_disable(dev);
-err_get_irq:
+err_put_clk:
+ fimc_put_clocks(ctx);
+err_free_irq:
free_irq(ctx->irq, ctx);
return ret;
@@ -1857,10 +1904,10 @@ static int fimc_remove(struct platform_device *pdev)
struct fimc_context *ctx = get_fimc_context(dev);
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
- devm_kfree(dev, ippdrv->prop_list);
exynos_drm_ippdrv_unregister(ippdrv);
mutex_destroy(&ctx->lock);
+ fimc_put_clocks(ctx);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
@@ -1915,36 +1962,22 @@ static int fimc_runtime_resume(struct device *dev)
}
#endif
-static struct fimc_driverdata exynos4210_fimc_data = {
- .parent_clk = "mout_mpll",
-};
-
-static struct fimc_driverdata exynos4410_fimc_data = {
- .parent_clk = "mout_mpll_user",
-};
-
-static struct platform_device_id fimc_driver_ids[] = {
- {
- .name = "exynos4210-fimc",
- .driver_data = (unsigned long)&exynos4210_fimc_data,
- }, {
- .name = "exynos4412-fimc",
- .driver_data = (unsigned long)&exynos4410_fimc_data,
- },
- {},
-};
-MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
-
static const struct dev_pm_ops fimc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
};
+static const struct of_device_id fimc_of_match[] = {
+ { .compatible = "samsung,exynos4210-fimc" },
+ { .compatible = "samsung,exynos4212-fimc" },
+ { },
+};
+
struct platform_driver fimc_driver = {
.probe = fimc_probe,
.remove = fimc_remove,
- .id_table = fimc_driver_ids,
.driver = {
+ .of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
.owner = THIS_MODULE,
.pm = &fimc_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 98cc147..746b282 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,7 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <video/of_display_timing.h>
#include <video/samsung_fimd.h>
#include <drm/exynos_drm.h>
@@ -800,18 +801,18 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
if (enable) {
int ret;
- ret = clk_enable(ctx->bus_clk);
+ ret = clk_prepare_enable(ctx->bus_clk);
if (ret < 0)
return ret;
- ret = clk_enable(ctx->lcd_clk);
+ ret = clk_prepare_enable(ctx->lcd_clk);
if (ret < 0) {
- clk_disable(ctx->bus_clk);
+ clk_disable_unprepare(ctx->bus_clk);
return ret;
}
} else {
- clk_disable(ctx->lcd_clk);
- clk_disable(ctx->bus_clk);
+ clk_disable_unprepare(ctx->lcd_clk);
+ clk_disable_unprepare(ctx->bus_clk);
}
return 0;
@@ -884,10 +885,25 @@ static int fimd_probe(struct platform_device *pdev)
DRM_DEBUG_KMS("%s\n", __FILE__);
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(dev, "no platform data specified\n");
- return -EINVAL;
+ if (pdev->dev.of_node) {
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ DRM_ERROR("memory allocation for pdata failed\n");
+ return -ENOMEM;
+ }
+
+ ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
+ OF_USE_NATIVE_MODE);
+ if (ret) {
+ DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
+ return ret;
+ }
+ } else {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ DRM_ERROR("no platform data specified\n");
+ return -EINVAL;
+ }
}
panel = &pdata->panel;
@@ -918,7 +934,7 @@ static int fimd_probe(struct platform_device *pdev)
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
return -ENXIO;
@@ -980,9 +996,6 @@ static int fimd_remove(struct platform_device *pdev)
if (ctx->suspended)
goto out;
- clk_disable(ctx->lcd_clk);
- clk_disable(ctx->bus_clk);
-
pm_runtime_set_suspended(dev);
pm_runtime_put_sync(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0e6fe00..cf4543f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -682,7 +682,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
+ EXYNOS_BO_WC, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 7c27df0..ba2f0f1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -51,21 +51,27 @@ struct drm_hdmi_context {
int exynos_platform_device_hdmi_register(void)
{
+ struct platform_device *pdev;
+
if (exynos_drm_hdmi_pdev)
return -EEXIST;
- exynos_drm_hdmi_pdev = platform_device_register_simple(
+ pdev = platform_device_register_simple(
"exynos-drm-hdmi", -1, NULL, 0);
- if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
- return PTR_ERR(exynos_drm_hdmi_pdev);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ exynos_drm_hdmi_pdev = pdev;
return 0;
}
void exynos_platform_device_hdmi_unregister(void)
{
- if (exynos_drm_hdmi_pdev)
+ if (exynos_drm_hdmi_pdev) {
platform_device_unregister(exynos_drm_hdmi_pdev);
+ exynos_drm_hdmi_pdev = NULL;
+ }
}
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
@@ -205,13 +211,45 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ struct drm_display_mode *m;
+ int mode_ok;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_ops && hdmi_ops->mode_fixup)
- hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
- adjusted_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode);
+
+ /* just return if user desired mode exists. */
+ if (mode_ok == 0)
+ return;
+
+ /*
+ * otherwise, find the most suitable mode among modes and change it
+ * to adjusted_mode.
+ */
+ list_for_each_entry(m, &connector->modes, head) {
+ mode_ok = drm_hdmi_check_timing(subdrv_dev, m);
+
+ if (mode_ok == 0) {
+ struct drm_mode_object base;
+ struct list_head head;
+
+ DRM_INFO("desired mode doesn't exist so\n");
+ DRM_INFO("use the most suitable mode among modes.\n");
+
+ DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+
+ /* preserve display mode header while copying. */
+ head = adjusted_mode->head;
+ base = adjusted_mode->base;
+ memcpy(adjusted_mode, m, sizeof(*m));
+ adjusted_mode->head = head;
+ adjusted_mode->base = base;
+ break;
+ }
+ }
}
static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index b7faa36..6b70944 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -36,9 +36,6 @@ struct exynos_hdmi_ops {
int (*power_on)(void *ctx, int mode);
/* manager */
- void (*mode_fixup)(void *ctx, struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void (*mode_set)(void *ctx, void *mode);
void (*get_max_resol)(void *ctx, unsigned int *width,
unsigned int *height);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 1adce07..29d2ad3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -47,6 +47,9 @@
#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+/* platform device pointer for ipp device. */
+static struct platform_device *exynos_drm_ipp_pdev;
+
/*
* A structure of event.
*
@@ -102,6 +105,30 @@ static LIST_HEAD(exynos_drm_ippdrv_list);
static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+int exynos_platform_device_ipp_register(void)
+{
+ struct platform_device *pdev;
+
+ if (exynos_drm_ipp_pdev)
+ return -EEXIST;
+
+ pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ exynos_drm_ipp_pdev = pdev;
+
+ return 0;
+}
+
+void exynos_platform_device_ipp_unregister(void)
+{
+ if (exynos_drm_ipp_pdev) {
+ platform_device_unregister(exynos_drm_ipp_pdev);
+ exynos_drm_ipp_pdev = NULL;
+ }
+}
+
int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
{
DRM_DEBUG_KMS("%s\n", __func__);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a40b9fb..947f09f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -674,7 +674,7 @@ static int rotator_probe(struct platform_device *pdev)
}
rot->clock = devm_clk_get(dev, "rotator");
- if (IS_ERR_OR_NULL(rot->clock)) {
+ if (IS_ERR(rot->clock)) {
dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(rot->clock);
goto err_clk_get;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c5f266..bbfc384 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -108,7 +108,20 @@ struct hdmi_tg_regs {
u8 tg_3d[1];
};
-struct hdmi_core_regs {
+struct hdmi_v13_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_v14_core_regs {
u8 h_blank[2];
u8 v2_blank[2];
u8 v1_blank[2];
@@ -147,11 +160,23 @@ struct hdmi_core_regs {
u8 vact_space_6[2];
};
+struct hdmi_v13_conf {
+ struct hdmi_v13_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
struct hdmi_v14_conf {
- int pixel_clock;
- struct hdmi_core_regs core;
+ struct hdmi_v14_core_regs core;
struct hdmi_tg_regs tg;
+};
+
+struct hdmi_conf_regs {
+ int pixel_clock;
int cea_video_id;
+ union {
+ struct hdmi_v13_conf v13_conf;
+ struct hdmi_v14_conf v14_conf;
+ } conf;
};
struct hdmi_context {
@@ -169,9 +194,8 @@ struct hdmi_context {
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
- /* current hdmiphy conf index */
- int cur_conf;
- struct hdmi_v14_conf mode_conf;
+ /* current hdmiphy conf regs */
+ struct hdmi_conf_regs mode_conf;
struct hdmi_resources res;
@@ -180,292 +204,60 @@ struct hdmi_context {
enum hdmi_type type;
};
-/* HDMI Version 1.3 */
-static const u8 hdmiphy_v13_conf27[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
- 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf27_027[32] = {
- 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
- 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf74_175[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
- 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf74_25[32] = {
- 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
- 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
- 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf148_5[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
- 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
-};
-
-struct hdmi_v13_tg_regs {
- u8 cmd;
- u8 h_fsz_l;
- u8 h_fsz_h;
- u8 hact_st_l;
- u8 hact_st_h;
- u8 hact_sz_l;
- u8 hact_sz_h;
- u8 v_fsz_l;
- u8 v_fsz_h;
- u8 vsync_l;
- u8 vsync_h;
- u8 vsync2_l;
- u8 vsync2_h;
- u8 vact_st_l;
- u8 vact_st_h;
- u8 vact_sz_l;
- u8 vact_sz_h;
- u8 field_chg_l;
- u8 field_chg_h;
- u8 vact_st2_l;
- u8 vact_st2_h;
- u8 vsync_top_hdmi_l;
- u8 vsync_top_hdmi_h;
- u8 vsync_bot_hdmi_l;
- u8 vsync_bot_hdmi_h;
- u8 field_top_hdmi_l;
- u8 field_top_hdmi_h;
- u8 field_bot_hdmi_l;
- u8 field_bot_hdmi_h;
-};
-
-struct hdmi_v13_core_regs {
- u8 h_blank[2];
- u8 v_blank[3];
- u8 h_v_line[3];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f[3];
- u8 h_sync_gen[3];
- u8 v_sync_gen1[3];
- u8 v_sync_gen2[3];
- u8 v_sync_gen3[3];
-};
-
-struct hdmi_v13_preset_conf {
- struct hdmi_v13_core_regs core;
- struct hdmi_v13_tg_regs tg;
-};
-
-struct hdmi_v13_conf {
- int width;
- int height;
- int vrefresh;
- bool interlace;
- int cea_video_id;
- const u8 *hdmiphy_data;
- const struct hdmi_v13_preset_conf *conf;
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = {
- .core = {
- .h_blank = {0x8a, 0x00},
- .v_blank = {0x0d, 0x6a, 0x01},
- .h_v_line = {0x0d, 0xa2, 0x35},
- .vsync_pol = {0x01},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00},
- .h_sync_gen = {0x0e, 0x30, 0x11},
- .v_sync_gen1 = {0x0f, 0x90, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x5a, 0x03, /* h_fsz */
- 0x8a, 0x00, 0xd0, 0x02, /* hact */
- 0x0d, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0xe0, 0x01, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = {
- .core = {
- .h_blank = {0x72, 0x01},
- .v_blank = {0xee, 0xf2, 0x00},
- .h_v_line = {0xee, 0x22, 0x67},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x6c, 0x50, 0x02},
- .v_sync_gen1 = {0x0a, 0x50, 0x00},
- .v_sync_gen2 = {0x01, 0x10, 0x00},
- .v_sync_gen3 = {0x01, 0x10, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x72, 0x06, /* h_fsz */
- 0x71, 0x01, 0x01, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v_blank = {0x32, 0xB2, 0x00},
- .h_v_line = {0x65, 0x04, 0xa5},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f = {0x49, 0x2A, 0x23},
- .h_sync_gen = {0x0E, 0xEA, 0x08},
- .v_sync_gen1 = {0x07, 0x20, 0x00},
- .v_sync_gen2 = {0x39, 0x42, 0x23},
- .v_sync_gen3 = {0x38, 0x87, 0x73},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0A, /* h_fsz */
- 0xCF, 0x02, 0x81, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
+struct hdmiphy_config {
+ int pixel_clock;
+ u8 conf[32];
};
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v_blank = {0x65, 0x6c, 0x01},
- .h_v_line = {0x65, 0x04, 0xa5},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x0e, 0xea, 0x08},
- .v_sync_gen1 = {0x09, 0x40, 0x00},
- .v_sync_gen2 = {0x01, 0x10, 0x00},
- .v_sync_gen3 = {0x01, 0x10, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0A, /* h_fsz */
- 0xCF, 0x02, 0x81, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+/* list of phy config settings */
+static const struct hdmiphy_config hdmiphy_v13_configs[] = {
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+ },
},
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v_blank = {0x32, 0xB2, 0x00},
- .h_v_line = {0x65, 0x84, 0x89},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f = {0x49, 0x2A, 0x23},
- .h_sync_gen = {0x56, 0x08, 0x02},
- .v_sync_gen1 = {0x07, 0x20, 0x00},
- .v_sync_gen2 = {0x39, 0x42, 0x23},
- .v_sync_gen3 = {0xa4, 0x44, 0x4a},
- /* other don't care */
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x17, 0x01, 0x81, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+ },
},
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v_blank = {0x65, 0x6c, 0x01},
- .h_v_line = {0x65, 0x84, 0x89},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x56, 0x08, 0x02},
- .v_sync_gen1 = {0x09, 0x40, 0x00},
- .v_sync_gen2 = {0x01, 0x10, 0x00},
- .v_sync_gen3 = {0x01, 0x10, 0x00},
- /* other don't care */
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x17, 0x01, 0x81, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+ },
},
};
-static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
- &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
-};
-
-/* HDMI Version 1.4 */
-struct hdmiphy_config {
- int pixel_clock;
- u8 conf[32];
-};
-
-/* list of all required phy config settings */
static const struct hdmiphy_config hdmiphy_v14_configs[] = {
{
.pixel_clock = 25200000,
@@ -873,22 +665,6 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
hdmi_v14_regs_dump(hdata, prefix);
}
-static int hdmi_v13_conf_index(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
- if (hdmi_v13_confs[i].width == mode->hdisplay &&
- hdmi_v13_confs[i].height == mode->vdisplay &&
- hdmi_v13_confs[i].vrefresh == mode->vrefresh &&
- hdmi_v13_confs[i].interlace ==
- ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
- true : false))
- return i;
-
- return -EINVAL;
-}
-
static u8 hdmi_chksum(struct hdmi_context *hdata,
u32 start, u8 len, u32 hdr_sum)
{
@@ -943,11 +719,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
AVI_SAME_AS_PIC_ASPECT_RATIO);
- if (hdata->type == HDMI_TYPE13)
- vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
- else
- vic = hdata->mode_conf.cea_video_id;
-
+ vic = hdata->mode_conf.cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
@@ -1000,63 +772,34 @@ static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
return raw_edid;
}
-static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
+static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
{
- int i;
+ const struct hdmiphy_config *confs;
+ int count, i;
- DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
- check_timing->xres, check_timing->yres,
- check_timing->refresh, (check_timing->vmode &
- FB_VMODE_INTERLACED) ? true : false);
-
- for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
- if (hdmi_v13_confs[i].width == check_timing->xres &&
- hdmi_v13_confs[i].height == check_timing->yres &&
- hdmi_v13_confs[i].vrefresh == check_timing->refresh &&
- hdmi_v13_confs[i].interlace ==
- ((check_timing->vmode & FB_VMODE_INTERLACED) ?
- true : false))
- return 0;
-
- /* TODO */
-
- return -EINVAL;
-}
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-static int hdmi_v14_find_phy_conf(int pixel_clock)
-{
- int i;
+ if (hdata->type == HDMI_TYPE13) {
+ confs = hdmiphy_v13_configs;
+ count = ARRAY_SIZE(hdmiphy_v13_configs);
+ } else if (hdata->type == HDMI_TYPE14) {
+ confs = hdmiphy_v14_configs;
+ count = ARRAY_SIZE(hdmiphy_v14_configs);
+ } else
+ return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) {
- if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock)
+ for (i = 0; i < count; i++)
+ if (confs[i].pixel_clock == pixel_clock)
return i;
- }
DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
return -EINVAL;
}
-static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
-{
- int i;
-
- DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
- check_timing->xres, check_timing->yres,
- check_timing->refresh, check_timing->pixclock,
- (check_timing->vmode & FB_VMODE_INTERLACED) ?
- true : false);
-
- for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
- if (hdmiphy_v14_configs[i].pixel_clock ==
- check_timing->pixclock)
- return 0;
-
- return -EINVAL;
-}
-
static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
{
struct hdmi_context *hdata = ctx;
+ int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1064,10 +807,10 @@ static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
timing->yres, timing->refresh,
timing->vmode);
- if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_check_timing(timing);
- else
- return hdmi_v14_check_timing(timing);
+ ret = hdmi_find_phy_conf(hdata, timing->pixclock);
+ if (ret < 0)
+ return ret;
+ return 0;
}
static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1301,10 +1044,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
{
- const struct hdmi_v13_preset_conf *conf =
- hdmi_v13_confs[hdata->cur_conf].conf;
- const struct hdmi_v13_core_regs *core = &conf->core;
- const struct hdmi_v13_tg_regs *tg = &conf->tg;
+ const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+ const struct hdmi_v13_core_regs *core =
+ &hdata->mode_conf.conf.v13_conf.core;
int tries;
/* setting core registers */
@@ -1334,34 +1076,34 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -1391,8 +1133,9 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
{
- struct hdmi_core_regs *core = &hdata->mode_conf.core;
- struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
+ const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+ const struct hdmi_v14_core_regs *core =
+ &hdata->mode_conf.conf.v14_conf.core;
int tries;
/* setting core registers */
@@ -1624,17 +1367,16 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
}
/* pixel clock */
- if (hdata->type == HDMI_TYPE13) {
- hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
- } else {
- i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock);
- if (i < 0) {
- DRM_ERROR("failed to find hdmiphy conf\n");
- return;
- }
+ i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+ if (i < 0) {
+ DRM_ERROR("failed to find hdmiphy conf\n");
+ return;
+ }
+ if (hdata->type == HDMI_TYPE13)
+ hdmiphy_data = hdmiphy_v13_configs[i].conf;
+ else
hdmiphy_data = hdmiphy_v14_configs[i].conf;
- }
memcpy(buffer, hdmiphy_data, 32);
ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -1687,75 +1429,121 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmi_regs_dump(hdata, "start");
}
-static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
{
- struct drm_display_mode *m;
- struct hdmi_context *hdata = ctx;
- int index;
+ int i;
+ BUG_ON(num_bytes > 4);
+ for (i = 0; i < num_bytes; i++)
+ reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+static void hdmi_v13_mode_set(struct hdmi_context *hdata,
+ struct drm_display_mode *m)
+{
+ struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+ unsigned int val;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ hdata->mode_conf.cea_video_id =
+ drm_match_cea_mode((struct drm_display_mode *)m);
+ hdata->mode_conf.pixel_clock = m->clock * 1000;
- if (hdata->type == HDMI_TYPE13)
- index = hdmi_v13_conf_index(adjusted_mode);
- else
- index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
+ hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
- /* just return if user desired mode exists. */
- if (index >= 0)
- return;
+ val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ hdmi_set_reg(core->vsync_pol, 1, val);
+
+ val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+ hdmi_set_reg(core->int_pro_mode, 1, val);
+
+ val = (m->hsync_start - m->hdisplay - 2);
+ val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+ val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
+ hdmi_set_reg(core->h_sync_gen, 3, val);
/*
- * otherwise, find the most suitable mode among modes and change it
- * to adjusted_mode.
+ * Quirk requirement for exynos HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
*/
- list_for_each_entry(m, &connector->modes, head) {
- if (hdata->type == HDMI_TYPE13)
- index = hdmi_v13_conf_index(m);
- else
- index = hdmi_v14_find_phy_conf(m->clock * 1000);
-
- if (index >= 0) {
- struct drm_mode_object base;
- struct list_head head;
-
- DRM_INFO("desired mode doesn't exist so\n");
- DRM_INFO("use the most suitable mode among modes.\n");
-
- DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
-
- /* preserve display mode header while copying. */
- head = adjusted_mode->head;
- base = adjusted_mode->base;
- memcpy(adjusted_mode, m, sizeof(*m));
- adjusted_mode->head = head;
- adjusted_mode->base = base;
- break;
- }
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ val = ((m->vsync_end - m->vdisplay) / 2);
+ val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+ hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+ val = m->vtotal / 2;
+ val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+ hdmi_set_reg(core->v_blank, 3, val);
+
+ val = (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+ val |= m->vtotal << 11;
+ hdmi_set_reg(core->v_blank_f, 3, val);
+
+ val = ((m->vtotal / 2) + 7);
+ val |= ((m->vtotal / 2) + 2) << 12;
+ hdmi_set_reg(core->v_sync_gen2, 3, val);
+
+ val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ val |= ((m->htotal / 2) +
+ (m->hsync_start - m->hdisplay)) << 12;
+ hdmi_set_reg(core->v_sync_gen3, 3, val);
+
+ hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+
+ hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+ } else {
+ /* Progressive Mode */
+
+ val = m->vtotal;
+ val |= (m->vtotal - m->vdisplay) << 11;
+ hdmi_set_reg(core->v_blank, 3, val);
+
+ hdmi_set_reg(core->v_blank_f, 3, 0);
+
+ val = (m->vsync_end - m->vdisplay);
+ val |= ((m->vsync_start - m->vdisplay) << 12);
+ hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+ hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
+ hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
+ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+ hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
}
-}
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
-{
- int i;
- BUG_ON(num_bytes > 4);
- for (i = 0; i < num_bytes; i++)
- reg_pair[i] = (value >> (8 * i)) & 0xff;
+ /* Timing generator registers */
+ hdmi_set_reg(tg->cmd, 1, 0x0);
+ hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+ hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+ hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+ hdmi_set_reg(tg->vsync, 2, 0x1);
+ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
}
static void hdmi_v14_mode_set(struct hdmi_context *hdata,
struct drm_display_mode *m)
{
- struct hdmi_core_regs *core = &hdata->mode_conf.core;
- struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
-
- hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+ struct hdmi_v14_core_regs *core =
+ &hdata->mode_conf.conf.v14_conf.core;
+ hdata->mode_conf.cea_video_id =
+ drm_match_cea_mode((struct drm_display_mode *)m);
hdata->mode_conf.pixel_clock = m->clock * 1000;
+
hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
hdmi_set_reg(core->v_line, 2, m->vtotal);
hdmi_set_reg(core->h_line, 2, m->htotal);
@@ -1852,25 +1640,22 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
hdmi_set_reg(tg->tg_3d, 1, 0x0);
-
}
static void hdmi_mode_set(void *ctx, void *mode)
{
struct hdmi_context *hdata = ctx;
- int conf_idx;
+ struct drm_display_mode *m = mode;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n",
+ __func__, m->hdisplay, m->vdisplay,
+ m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
+ "INTERLACED" : "PROGERESSIVE");
- if (hdata->type == HDMI_TYPE13) {
- conf_idx = hdmi_v13_conf_index(mode);
- if (conf_idx >= 0)
- hdata->cur_conf = conf_idx;
- else
- DRM_DEBUG_KMS("not supported mode\n");
- } else {
+ if (hdata->type == HDMI_TYPE13)
+ hdmi_v13_mode_set(hdata, mode);
+ else
hdmi_v14_mode_set(hdata, mode);
- }
}
static void hdmi_get_max_resol(void *ctx, unsigned int *width,
@@ -1983,7 +1768,6 @@ static struct exynos_hdmi_ops hdmi_ops = {
.check_timing = hdmi_check_timing,
/* manager */
- .mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.get_max_resol = hdmi_get_max_resol,
.commit = hdmi_commit,
@@ -2023,27 +1807,27 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
/* get clocks, power */
res->hdmi = devm_clk_get(dev, "hdmi");
- if (IS_ERR_OR_NULL(res->hdmi)) {
+ if (IS_ERR(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
- if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ if (IS_ERR(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
- if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ if (IS_ERR(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
- if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ if (IS_ERR(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
res->hdmiphy = devm_clk_get(dev, "hdmiphy");
- if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ if (IS_ERR(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2f4f72f..ec3e376 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -643,12 +643,14 @@ static void mixer_win_reset(struct mixer_context *ctx)
/* setting graphical layers */
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_WIN_BLEND_EN;
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
- /* the same configuration for both layers */
+ /* Don't blend layer 0 onto the mixer background */
mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ /* Blend layer 1 into layer 0 */
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
/* setting video layers */
@@ -820,7 +822,6 @@ static void mixer_win_disable(void *ctx, int win)
static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
{
- struct mixer_context *mixer_ctx = ctx;
u32 w, h;
w = timing->xres;
@@ -831,9 +832,6 @@ static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
timing->refresh, (timing->vmode &
FB_VMODE_INTERLACED) ? true : false);
- if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
- return 0;
-
if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
(w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
(w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
@@ -1047,13 +1045,13 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = devm_clk_get(dev, "mixer");
- if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+ if (IS_ERR(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
return -ENODEV;
}
mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
- if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+ if (IS_ERR(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
return -ENODEV;
}
@@ -1096,17 +1094,17 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct resource *res;
mixer_res->vp = devm_clk_get(dev, "vp");
- if (IS_ERR_OR_NULL(mixer_res->vp)) {
+ if (IS_ERR(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+ if (IS_ERR(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
return -ENODEV;
}
mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
- if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+ if (IS_ERR(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index b4f9ca1..3049613 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -661,9 +661,8 @@
#define EXYNOS_CLKSRC_SCLK (1 << 1)
/* SYSREG for FIMC writeback */
-#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
-#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
-#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
-#define SYSREG_FIMD0WB_DEST_SHIFT 23
+#define SYSREG_CAMERA_BLK (0x0218)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1188f0f..1f6e2df 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -2,10 +2,15 @@ config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
select FB_CFB_COPYAREA
- select FB_CFB_FILLRECT
- select FB_CFB_IMAGEBLIT
- select DRM_KMS_HELPER
- select DRM_TTM
+ select FB_CFB_FILLRECT
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
+ select ACPI_VIDEO if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
+ select INPUT if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 8c17534..7b8386f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -276,6 +276,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
goto failed_connector;
connector = &psb_intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(dev, connector,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index e223b50..464153d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -319,6 +319,7 @@ void cdv_hdmi_init(struct drm_device *dev,
goto err_priv;
connector = &psb_intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder = &psb_intel_encoder->base;
drm_connector_init(dev, connector,
&cdv_hdmi_connector_funcs,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2590cac..1534e22 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -431,7 +431,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
fbdev->psb_fb_helper.fbdev = info;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
- strcpy(info->fix.id, "psbfb");
+ strcpy(info->fix.id, "psbdrmfb");
info->flags = FBINFO_DEFAULT;
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
@@ -772,8 +772,8 @@ void psb_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++)
psb_intel_crtc_init(dev, i, mode_dev);
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
psb_setup_outputs(dev);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 054e26e..1f82183 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -80,7 +80,8 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* the GTT. This is protected via the gtt mutex which the caller
* must hold.
*/
-static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
+ int resume)
{
u32 __iomem *gtt_slot;
u32 pte;
@@ -97,8 +98,10 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
gtt_slot = psb_gtt_entry(dev, r);
pages = r->pages;
- /* Make sure changes are visible to the GPU */
- set_pages_array_wc(pages, r->npage);
+ if (!resume) {
+ /* Make sure changes are visible to the GPU */
+ set_pages_array_wc(pages, r->npage);
+ }
/* Write our page entries into the GTT itself */
for (i = r->roll; i < r->npage; i++) {
@@ -269,7 +272,7 @@ int psb_gtt_pin(struct gtt_range *gt)
ret = psb_gtt_attach_pages(gt);
if (ret < 0)
goto out;
- ret = psb_gtt_insert(dev, gt);
+ ret = psb_gtt_insert(dev, gt, 0);
if (ret < 0) {
psb_gtt_detach_pages(gt);
goto out;
@@ -421,9 +424,11 @@ int psb_gtt_init(struct drm_device *dev, int resume)
int ret = 0;
uint32_t pte;
- mutex_init(&dev_priv->gtt_mutex);
+ if (!resume) {
+ mutex_init(&dev_priv->gtt_mutex);
+ psb_gtt_alloc(dev);
+ }
- psb_gtt_alloc(dev);
pg = &dev_priv->gtt;
/* Enable the GTT */
@@ -505,7 +510,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
/*
* Map the GTT and the stolen memory area
*/
- dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ if (!resume)
+ dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
@@ -513,7 +519,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
goto out_err;
}
- dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+ if (!resume)
+ dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
+ stolen_size);
if (!dev_priv->vram_addr) {
dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM;
@@ -549,3 +557,31 @@ out_err:
psb_gtt_takedown(dev);
return ret;
}
+
+int psb_gtt_restore(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct resource *r = dev_priv->gtt_mem->child;
+ struct gtt_range *range;
+ unsigned int restored = 0, total = 0, size = 0;
+
+ /* On resume, the gtt_mutex is already initialized */
+ mutex_lock(&dev_priv->gtt_mutex);
+ psb_gtt_init(dev, 1);
+
+ while (r != NULL) {
+ range = container_of(r, struct gtt_range, resource);
+ if (range->pages) {
+ psb_gtt_insert(dev, range, 1);
+ size += range->resource.end - range->resource.start;
+ restored++;
+ }
+ r = r->sibling;
+ total++;
+ }
+ mutex_unlock(&dev_priv->gtt_mutex);
+ DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
+ total, (size / 1024));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index aa17423..6191d10 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -60,5 +60,5 @@ extern int psb_gtt_pin(struct gtt_range *gt);
extern void psb_gtt_unpin(struct gtt_range *gt);
extern void psb_gtt_roll(struct drm_device *dev,
struct gtt_range *gt, int roll);
-
+extern int psb_gtt_restore(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 403fffb..d349734 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -218,12 +218,11 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
- lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+ lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
if (!lvds_bl) {
dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
return;
}
- memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
dev_priv->lvds_bl = lvds_bl;
}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index c6267c9..978ae4b 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -19,8 +19,8 @@
*
*/
-#ifndef _I830_BIOS_H_
-#define _I830_BIOS_H_
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
@@ -618,4 +618,4 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
#define PORT_IDPC 8
#define PORT_IDPD 9
-#endif /* _I830_BIOS_H_ */
+#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 2d4ab48..3abf831 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -92,8 +92,8 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
{
struct mdfld_dsi_pkg_sender *sender =
mdfld_dsi_get_pkg_sender(dsi_config);
- struct drm_device *dev = sender->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
u32 gen_ctrl_val;
if (!sender) {
@@ -101,6 +101,9 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
return;
}
+ dev = sender->dev;
+ dev_priv = dev->dev_private;
+
/* Set default display backlight value to 85% (0xd8)*/
mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
true);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 88627e3..1eb86c7 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -319,8 +319,7 @@ void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
struct hdmi_i2c_dev *i2c_dev;
hdmi_dev = pci_get_drvdata(dev);
- if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
- DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+ i2c_del_adapter(&oaktrail_hdmi_i2c_adapter);
i2c_dev = hdmi_dev->i2c_dev;
kfree(i2c_dev);
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 889b854..b6b135f 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -110,6 +110,8 @@ static void gma_resume_display(struct pci_dev *pdev)
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+ psb_gtt_restore(dev); /* Rebuild our GTT mappings */
dev_priv->ops->restore_regs(dev);
}
@@ -313,3 +315,18 @@ int psb_runtime_idle(struct device *dev)
else
return 1;
}
+
+int gma_power_thaw(struct device *_dev)
+{
+ return gma_power_resume(_dev);
+}
+
+int gma_power_freeze(struct device *_dev)
+{
+ return gma_power_suspend(_dev);
+}
+
+int gma_power_restore(struct device *_dev)
+{
+ return gma_power_resume(_dev);
+}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 1969d2e..56d8708 100644
--- a/drivers/gpu/drm/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
@@ -41,6 +41,9 @@ void gma_power_uninit(struct drm_device *dev);
*/
int gma_power_suspend(struct device *dev);
int gma_power_resume(struct device *dev);
+int gma_power_thaw(struct device *dev);
+int gma_power_freeze(struct device *dev);
+int gma_power_restore(struct device *_dev);
/*
* These are the functions the driver should use to wrap all hw access
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 111e3df..bddea58 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -601,6 +601,9 @@ static void psb_remove(struct pci_dev *pdev)
static const struct dev_pm_ops psb_pm_ops = {
.resume = gma_power_resume,
.suspend = gma_power_suspend,
+ .thaw = gma_power_thaw,
+ .freeze = gma_power_freeze,
+ .restore = gma_power_restore,
.runtime_suspend = psb_runtime_suspend,
.runtime_resume = psb_runtime_resume,
.runtime_idle = psb_runtime_idle,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index a7fd6c4..6053b8a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -876,7 +876,6 @@ extern const struct psb_ops cdv_chip_ops;
#define PSB_D_MSVDX (1 << 9)
#define PSB_D_TOPAZ (1 << 10)
-extern int drm_psb_no_fb;
extern int drm_idle_check_interval;
/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 9edb190..6e8f42b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -50,119 +50,41 @@ struct psb_intel_p2_t {
int p2_slow, p2_fast;
};
-#define INTEL_P2_NUM 2
-
struct psb_intel_limit_t {
struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
struct psb_intel_p2_t p2;
};
-#define I8XX_DOT_MIN 25000
-#define I8XX_DOT_MAX 350000
-#define I8XX_VCO_MIN 930000
-#define I8XX_VCO_MAX 1400000
-#define I8XX_N_MIN 3
-#define I8XX_N_MAX 16
-#define I8XX_M_MIN 96
-#define I8XX_M_MAX 140
-#define I8XX_M1_MIN 18
-#define I8XX_M1_MAX 26
-#define I8XX_M2_MIN 6
-#define I8XX_M2_MAX 16
-#define I8XX_P_MIN 4
-#define I8XX_P_MAX 128
-#define I8XX_P1_MIN 2
-#define I8XX_P1_MAX 33
-#define I8XX_P1_LVDS_MIN 1
-#define I8XX_P1_LVDS_MAX 6
-#define I8XX_P2_SLOW 4
-#define I8XX_P2_FAST 2
-#define I8XX_P2_LVDS_SLOW 14
-#define I8XX_P2_LVDS_FAST 14 /* No fast option */
-#define I8XX_P2_SLOW_LIMIT 165000
-
-#define I9XX_DOT_MIN 20000
-#define I9XX_DOT_MAX 400000
-#define I9XX_VCO_MIN 1400000
-#define I9XX_VCO_MAX 2800000
-#define I9XX_N_MIN 1
-#define I9XX_N_MAX 6
-#define I9XX_M_MIN 70
-#define I9XX_M_MAX 120
-#define I9XX_M1_MIN 8
-#define I9XX_M1_MAX 18
-#define I9XX_M2_MIN 3
-#define I9XX_M2_MAX 7
-#define I9XX_P_SDVO_DAC_MIN 5
-#define I9XX_P_SDVO_DAC_MAX 80
-#define I9XX_P_LVDS_MIN 7
-#define I9XX_P_LVDS_MAX 98
-#define I9XX_P1_MIN 1
-#define I9XX_P1_MAX 8
-#define I9XX_P2_SDVO_DAC_SLOW 10
-#define I9XX_P2_SDVO_DAC_FAST 5
-#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
-#define I9XX_P2_LVDS_SLOW 14
-#define I9XX_P2_LVDS_FAST 7
-#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-
-#define INTEL_LIMIT_I8XX_DVO_DAC 0
-#define INTEL_LIMIT_I8XX_LVDS 1
-#define INTEL_LIMIT_I9XX_SDVO_DAC 2
-#define INTEL_LIMIT_I9XX_LVDS 3
+#define INTEL_LIMIT_I9XX_SDVO_DAC 0
+#define INTEL_LIMIT_I9XX_LVDS 1
static const struct psb_intel_limit_t psb_intel_limits[] = {
- { /* INTEL_LIMIT_I8XX_DVO_DAC */
- .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
- .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
- .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
- .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
- .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
- .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
- .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
- .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
- .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
- .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
- },
- { /* INTEL_LIMIT_I8XX_LVDS */
- .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
- .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
- .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
- .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
- .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
- .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
- .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
- .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
- .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
- .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
- },
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
- .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
- .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
- .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
- .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
- .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
- .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
- .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
- .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
- .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
- .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
- I9XX_P2_SDVO_DAC_FAST},
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1400000, .max = 2800000},
+ .n = {.min = 1, .max = 6},
+ .m = {.min = 70, .max = 120},
+ .m1 = {.min = 8, .max = 18},
+ .m2 = {.min = 3, .max = 7},
+ .p = {.min = 5, .max = 80},
+ .p1 = {.min = 1, .max = 8},
+ .p2 = {.dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5},
},
{ /* INTEL_LIMIT_I9XX_LVDS */
- .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
- .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
- .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
- .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
- .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
- .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
- .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
- .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1400000, .max = 2800000},
+ .n = {.min = 1, .max = 6},
+ .m = {.min = 70, .max = 120},
+ .m1 = {.min = 8, .max = 18},
+ .m2 = {.min = 3, .max = 7},
+ .p = {.min = 7, .max = 98},
+ .p1 = {.min = 1, .max = 8},
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
- .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
- .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+ .p2 = {.dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7},
},
};
@@ -177,9 +99,7 @@ static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
return limit;
}
-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-
-static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
@@ -187,22 +107,6 @@ static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
clock->dot = clock->vco / clock->p;
}
-/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
-
-static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
-{
- clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
- clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / (clock->n + 2);
- clock->dot = clock->vco / clock->p;
-}
-
-static void psb_intel_clock(struct drm_device *dev, int refclk,
- struct psb_intel_clock_t *clock)
-{
- return i9xx_clock(refclk, clock);
-}
-
/**
* Returns whether any output on the specified pipe is of the specified type
*/
@@ -308,7 +212,7 @@ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
clock.p1++) {
int this_err;
- psb_intel_clock(dev, refclk, &clock);
+ psb_intel_clock(refclk, &clock);
if (!psb_intel_PLL_is_valid
(crtc, &clock))
@@ -1068,7 +972,7 @@ static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, uint32_t type, uint32_t size)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
@@ -1149,9 +1053,9 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
- i8xx_clock(66000, &clock);
+ psb_intel_clock(66000, &clock);
} else
- i8xx_clock(48000, &clock);
+ psb_intel_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -1166,7 +1070,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
else
clock.p2 = 2;
- i8xx_clock(48000, &clock);
+ psb_intel_clock(48000, &clock);
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
@@ -1225,7 +1129,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct gtt_range *gt;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
index 535b49a..3724b97 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -21,8 +21,5 @@
#define _INTEL_DISPLAY_H_
bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
-void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, uint32_t type, uint32_t size);
-void psb_intel_crtc_destroy(struct drm_crtc *crtc);
#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 90f2d11..4dcae42 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -32,9 +32,6 @@
/* maximum connectors per crtcs in the mode set */
#define INTELFB_CONN_LIMIT 4
-#define INTEL_I2C_BUS_DVO 1
-#define INTEL_I2C_BUS_SDVO 2
-
/* Intel Pipe Clone Bit */
#define INTEL_HDMIB_CLONE_BIT 1
#define INTEL_HDMIC_CLONE_BIT 2
@@ -68,11 +65,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 9
#define INTEL_OUTPUT_EDP 10
-#define INTEL_DVO_CHIP_NONE 0
-#define INTEL_DVO_CHIP_LVDS 1
-#define INTEL_DVO_CHIP_TMDS 2
-#define INTEL_DVO_CHIP_TVOUT 4
-
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index d914719..0be30e4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -493,7 +493,6 @@
#define PIPEACONF_DISABLE 0
#define PIPEACONF_DOUBLE_WIDE (1 << 30)
#define PIPECONF_ACTIVE (1 << 30)
-#define I965_PIPECONF_ACTIVE (1 << 30)
#define PIPECONF_DSIPLL_LOCK (1 << 29)
#define PIPEACONF_SINGLE_WIDE 0
#define PIPEACONF_PIPE_UNLOCKED 0
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index a4cc777a..19e3660 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -134,6 +134,9 @@ struct psb_intel_sdvo {
/* Input timings for adjusted_mode */
struct psb_intel_sdvo_dtd input_dtd;
+
+ /* Saved SDVO output states */
+ uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
};
struct psb_intel_sdvo_connector {
@@ -1830,6 +1833,34 @@ done:
#undef CHECK_PROPERTY
}
+static void psb_intel_sdvo_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_sdvo *sdvo =
+ to_psb_intel_sdvo(&psb_intel_encoder->base);
+
+ sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
+}
+
+static void psb_intel_sdvo_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_encoder *encoder =
+ &psb_intel_attached_encoder(connector)->base;
+ struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
+
+ /* Force a full mode set on the crtc. We're supposed to have the
+ mode_config lock already. */
+ if (connector->status == connector_status_connected)
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ NULL);
+}
+
static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
.dpms = psb_intel_sdvo_dpms,
.mode_fixup = psb_intel_sdvo_mode_fixup,
@@ -1840,6 +1871,8 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
+ .save = psb_intel_sdvo_save,
+ .restore = psb_intel_sdvo_restore,
.detect = psb_intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 8652cdf..029eccf 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
- if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+ if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
dsp_int = 1;
/* FIXME: Handle Medfield
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 603045b..debb7f1 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -21,8 +21,8 @@
*
**************************************************************************/
-#ifndef _SYSIRQ_H_
-#define _SYSIRQ_H_
+#ifndef _PSB_IRQ_H_
+#define _PSB_IRQ_H_
#include <drm/drmP.h>
@@ -44,4 +44,4 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
int mdfld_enable_te(struct drm_device *dev, int pipe);
void mdfld_disable_te(struct drm_device *dev, int pipe);
-#endif /* _SYSIRQ_H_ */
+#endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7299ea4..e913d32 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -772,6 +772,23 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
}
+
+ obj = error->ring[i].ctx;
+ if (obj) {
+ seq_printf(m, "%s --- HW Context = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
+ }
+ }
}
if (error->overlay)
@@ -849,76 +866,42 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
-static ssize_t
-i915_next_seqno_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static int
+i915_next_seqno_get(void *data, u64 *val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- int len;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- len = snprintf(buf, sizeof(buf),
- "next_seqno : 0x%x\n",
- dev_priv->next_seqno);
-
+ *val = dev_priv->next_seqno;
mutex_unlock(&dev->struct_mutex);
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_next_seqno_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct drm_device *dev = filp->private_data;
- char buf[20];
- u32 val = 1;
+static int
+i915_next_seqno_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
int ret;
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- ret = kstrtouint(buf, 0, &val);
- if (ret < 0)
- return ret;
- }
-
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
ret = i915_gem_set_seqno(dev, val);
-
mutex_unlock(&dev->struct_mutex);
- return ret ?: cnt;
+ return ret;
}
-static const struct file_operations i915_next_seqno_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_next_seqno_read,
- .write = i915_next_seqno_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
+ i915_next_seqno_get, i915_next_seqno_set,
+ "0x%llx\n");
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
@@ -1023,6 +1006,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * GT_FREQUENCY_MULTIPLIER);
+
+ seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1371,7 +1357,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
+ seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
@@ -1380,7 +1366,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
+ seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+ gpu_freq * GT_FREQUENCY_MULTIPLIER,
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -1680,105 +1669,51 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
-static ssize_t
-i915_wedged_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static int
+i915_wedged_get(void *data, u64 *val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- int len;
- len = snprintf(buf, sizeof(buf),
- "wedged : %d\n",
- atomic_read(&dev_priv->gpu_error.reset_counter));
+ *val = atomic_read(&dev_priv->gpu_error.reset_counter);
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_wedged_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
+static int
+i915_wedged_set(void *data, u64 val)
{
- struct drm_device *dev = filp->private_data;
- char buf[20];
- int val = 1;
-
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
+ struct drm_device *dev = data;
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
-
- DRM_INFO("Manually setting wedged to %d\n", val);
+ DRM_INFO("Manually setting wedged to %llu\n", val);
i915_handle_error(dev, val);
- return cnt;
+ return 0;
}
-static const struct file_operations i915_wedged_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_wedged_read,
- .write = i915_wedged_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
+ i915_wedged_get, i915_wedged_set,
+ "%llu\n");
-static ssize_t
-i915_ring_stop_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static int
+i915_ring_stop_get(void *data, u64 *val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[20];
- int len;
- len = snprintf(buf, sizeof(buf),
- "0x%08x\n", dev_priv->gpu_error.stop_rings);
+ *val = dev_priv->gpu_error.stop_rings;
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_ring_stop_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
+static int
+i915_ring_stop_set(void *data, u64 val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- char buf[20];
- int val = 0, ret;
-
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
+ int ret;
- DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1787,16 +1722,12 @@ i915_ring_stop_write(struct file *filp,
dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return 0;
}
-static const struct file_operations i915_ring_stop_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_ring_stop_read,
- .write = i915_ring_stop_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
+ i915_ring_stop_get, i915_ring_stop_set,
+ "0x%08llx\n");
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
@@ -1806,46 +1737,23 @@ static const struct file_operations i915_ring_stop_fops = {
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE)
-static ssize_t
-i915_drop_caches_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static int
+i915_drop_caches_get(void *data, u64 *val)
{
- char buf[20];
- int len;
+ *val = DROP_ALL;
- len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_drop_caches_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
+static int
+i915_drop_caches_set(void *data, u64 val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
- char buf[20];
- int val = 0, ret;
-
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
+ int ret;
- DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
+ DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -1883,27 +1791,19 @@ i915_drop_caches_write(struct file *filp,
unlock:
mutex_unlock(&dev->struct_mutex);
- return ret ?: cnt;
+ return ret;
}
-static const struct file_operations i915_drop_caches_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_drop_caches_read,
- .write = i915_drop_caches_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
+ i915_drop_caches_get, i915_drop_caches_set,
+ "0x%08llx\n");
-static ssize_t
-i915_max_freq_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static int
+i915_max_freq_get(void *data, u64 *val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- int len, ret;
+ int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
@@ -1912,42 +1812,23 @@ i915_max_freq_read(struct file *filp,
if (ret)
return ret;
- len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+ *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_max_freq_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
+static int
+i915_max_freq_set(void *data, u64 val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- char buf[20];
- int val = 1, ret;
+ int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
-
- DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+ DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
@@ -1956,30 +1837,24 @@ i915_max_freq_write(struct file *filp,
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
-
- gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ do_div(val, GT_FREQUENCY_MULTIPLIER);
+ dev_priv->rps.max_delay = val;
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
- return cnt;
+ return 0;
}
-static const struct file_operations i915_max_freq_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_max_freq_read,
- .write = i915_max_freq_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
+ i915_max_freq_get, i915_max_freq_set,
+ "%llu\n");
-static ssize_t
-i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
- loff_t *ppos)
+static int
+i915_min_freq_get(void *data, u64 *val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- int len, ret;
+ int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
@@ -1988,40 +1863,23 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (ret)
return ret;
- len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+ *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
- loff_t *ppos)
+static int
+i915_min_freq_set(void *data, u64 val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- char buf[20];
- int val = 1, ret;
+ int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
-
- DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+ DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
@@ -2030,33 +1888,25 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
-
- gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ do_div(val, GT_FREQUENCY_MULTIPLIER);
+ dev_priv->rps.min_delay = val;
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
- return cnt;
+ return 0;
}
-static const struct file_operations i915_min_freq_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_min_freq_read,
- .write = i915_min_freq_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
+ i915_min_freq_get, i915_min_freq_set,
+ "%llu\n");
-static ssize_t
-i915_cache_sharing_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static int
+i915_cache_sharing_get(void *data, u64 *val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
u32 snpcr;
- int len, ret;
+ int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
@@ -2068,46 +1918,25 @@ i915_cache_sharing_read(struct file *filp,
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex);
- len = snprintf(buf, sizeof(buf),
- "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
- GEN6_MBC_SNPCR_SHIFT);
+ *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
}
-static ssize_t
-i915_cache_sharing_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
+static int
+i915_cache_sharing_set(void *data, u64 val)
{
- struct drm_device *dev = filp->private_data;
+ struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- char buf[20];
u32 snpcr;
- int val = 1;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
-
- if (val < 0 || val > 3)
+ if (val > 3)
return -EINVAL;
- DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
+ DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
/* Update the cache sharing policy here as well */
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
@@ -2115,16 +1944,12 @@ i915_cache_sharing_write(struct file *filp,
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- return cnt;
+ return 0;
}
-static const struct file_operations i915_cache_sharing_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i915_cache_sharing_read,
- .write = i915_cache_sharing_write,
- .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
+ i915_cache_sharing_get, i915_cache_sharing_set,
+ "%llu\n");
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4fa6beb..3b315ba 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1322,6 +1322,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ dev_priv->mm.suspended = 0;
+ return 0;
+ }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1453,6 +1457,22 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
}
/**
+ * intel_early_sanitize_regs - clean up BIOS state
+ * @dev: DRM device
+ *
+ * This function must be called before we do any I915_READ or I915_WRITE. Its
+ * purpose is to clean up any state left by the BIOS that may affect us when
+ * reading and/or writing registers.
+ */
+static void intel_early_sanitize_regs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev))
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
@@ -1498,6 +1518,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+ if (!dev_priv->regs) {
+ DRM_ERROR("failed to map registers\n");
+ ret = -EIO;
+ goto put_bridge;
+ }
+
+ intel_early_sanitize_regs(dev);
+
ret = i915_gem_gtt_init(dev);
if (ret)
goto put_bridge;
@@ -1522,26 +1564,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- /* Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (info->gen < 5)
- mmio_size = 512*1024;
- else
- mmio_size = 2*1024*1024;
-
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
- if (!dev_priv->regs) {
- DRM_ERROR("failed to map registers\n");
- ret = -EIO;
- goto put_gmch;
- }
-
aperture_size = dev_priv->gtt.mappable_end;
dev_priv->gtt.mappable =
@@ -1612,16 +1634,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- dev_priv->num_pipe = 3;
- else if (IS_MOBILE(dev) || !IS_GEN2(dev))
- dev_priv->num_pipe = 2;
- else
- dev_priv->num_pipe = 1;
+ dev_priv->num_plane = 1;
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->num_plane = 2;
- ret = drm_vblank_init(dev, dev_priv->num_pipe);
- if (ret)
- goto out_gem_unload;
+ if (INTEL_INFO(dev)->num_pipes) {
+ ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+ if (ret)
+ goto out_gem_unload;
+ }
/* Start out suspended */
dev_priv->mm.suspended = 1;
@@ -1636,9 +1657,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_setup_sysfs(dev);
- /* Must be done after probing outputs */
- intel_opregion_init(dev);
- acpi_video_register();
+ if (INTEL_INFO(dev)->num_pipes) {
+ /* Must be done after probing outputs */
+ intel_opregion_init(dev);
+ acpi_video_register();
+ }
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
@@ -1663,10 +1686,9 @@ out_mtrrfree:
dev_priv->mm.gtt_mtrr = -1;
}
io_mapping_free(dev_priv->gtt.mappable);
+ dev_priv->gtt.gtt_remove(dev);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
-put_gmch:
- dev_priv->gtt.gtt_remove(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9b5789..9ebe895 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -121,9 +121,7 @@ MODULE_PARM_DESC(i915_enable_ppgtt,
unsigned int i915_preliminary_hw_support __read_mostly = 0;
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support. "
- "Enable Haswell and ValleyView Support. "
- "(default: false)");
+ "Enable preliminary hardware support. (default: false)");
int i915_disable_power_well __read_mostly = 0;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -142,75 +140,85 @@ extern int intel_agp_enabled;
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
+#define INTEL_QUANTA_VGA_DEVICE(info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class_mask = 0xff0000, \
+ .vendor = 0x8086, \
+ .device = 0x16a, \
+ .subvendor = 0x152d, \
+ .subdevice = 0x8990, \
+ .driver_data = (unsigned long) info }
+
+
static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .gen = 2,
+ .gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .gen = 2,
+ .gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_mobile = 1,
+ .gen = 3, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1,
+ .gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1,
.has_overlay = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1,
+ .gen = 4, .is_crestline = 1, .num_pipes = 2,
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1,
+ .gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
};
static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_g4x = 1,
+ .gen = 4, .is_g4x = 1, .num_pipes = 2,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
@@ -218,26 +226,26 @@ static const struct intel_device_info intel_gm45_info = {
};
static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5,
+ .gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_mobile = 1,
+ .gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6,
+ .gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
@@ -246,7 +254,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_mobile = 1,
+ .gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
@@ -255,61 +263,57 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_force_wake = 1,
};
+#define GEN7_FEATURES \
+ .gen = 7, .num_pipes = 3, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_bsd_ring = 1, \
+ .has_blt_ring = 1, \
+ .has_llc = 1, \
+ .has_force_wake = 1
+
static const struct intel_device_info intel_ivybridge_d_info = {
- .is_ivybridge = 1, .gen = 7,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
- .has_llc = 1,
- .has_force_wake = 1,
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
- .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
- .has_llc = 1,
- .has_force_wake = 1,
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .num_pipes = 0, /* legal, last one wins */
};
static const struct intel_device_info intel_valleyview_m_info = {
- .gen = 7, .is_mobile = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 0,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
+ GEN7_FEATURES,
+ .is_mobile = 1,
+ .num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_llc = 0, /* legal, last one wins */
};
static const struct intel_device_info intel_valleyview_d_info = {
- .gen = 7,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 0,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
+ GEN7_FEATURES,
+ .num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_llc = 0, /* legal, last one wins */
};
static const struct intel_device_info intel_haswell_d_info = {
- .is_haswell = 1, .gen = 7,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
- .has_llc = 1,
- .has_force_wake = 1,
+ GEN7_FEATURES,
+ .is_haswell = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
- .is_haswell = 1, .gen = 7, .is_mobile = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
- .has_llc = 1,
- .has_force_wake = 1,
+ GEN7_FEATURES,
+ .is_haswell = 1,
+ .is_mobile = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -356,6 +360,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+ INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
@@ -394,6 +399,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0, 0}
@@ -408,6 +416,15 @@ void intel_detect_pch(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pch;
+ /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+ * (which really amounts to a PCH but no South Display).
+ */
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->num_pch_pll = 0;
+ return;
+ }
+
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
@@ -442,11 +459,13 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(IS_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(!IS_ULT(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
@@ -474,6 +493,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -497,10 +517,14 @@ static int i915_drm_freeze(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
- intel_modeset_disable(dev);
-
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
+ /*
+ * Disable CRTCs directly since we want to preserve sw state
+ * for _thaw.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ dev_priv->display.crtc_disable(crtc);
}
i915_save_state(dev);
@@ -556,6 +580,24 @@ void intel_console_resume(struct work_struct *work)
console_unlock();
}
+static void intel_resume_hotplug(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ mutex_lock(&mode_config->mutex);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->hot_plug)
+ encoder->hot_plug(encoder);
+
+ mutex_unlock(&mode_config->mutex);
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -578,7 +620,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
drm_irq_install(dev);
intel_modeset_init_hw(dev);
- intel_modeset_setup_hw_state(dev, false);
+
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
/*
* ... but also need to make sure that hotplug processing
@@ -588,6 +633,8 @@ static int __i915_drm_thaw(struct drm_device *dev)
* */
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
+ /* Config may have changed between suspend and resume */
+ intel_resume_hotplug(dev);
}
intel_opregion_init(dev);
@@ -732,6 +779,7 @@ static int ironlake_do_reset(struct drm_device *dev)
int ret;
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
@@ -740,6 +788,7 @@ static int ironlake_do_reset(struct drm_device *dev)
/* We can't reset render&media without also resetting display ... */
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
@@ -803,7 +852,7 @@ int intel_gpu_reset(struct drm_device *dev)
/* Also reset the gpu hangman. */
if (dev_priv->gpu_error.stop_rings) {
- DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+ DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
@@ -882,7 +931,11 @@ int i915_reset(struct drm_device *dev)
ring->init(ring);
i915_gem_context_init(dev);
- i915_gem_init_ppgtt(dev);
+ if (dev_priv->mm.aliasing_ppgtt) {
+ ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
+ if (ret)
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ }
/*
* It would make sense to re-init all the other hw state, at
@@ -1147,6 +1200,27 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
I915_WRITE_NOTRACE(MI_MODE, 0);
}
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (IS_HASWELL(dev_priv->dev) &&
+ (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+ reg);
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (IS_HASWELL(dev_priv->dev) &&
+ (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed write to %x\n", reg);
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
@@ -1183,18 +1257,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
} \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
- if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
- DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
- I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
- } \
+ hsw_unclaimed_reg_clear(dev_priv, reg); \
write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
- DRM_ERROR("Unclaimed write to %x\n", reg); \
- writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
- } \
+ hsw_unclaimed_reg_check(dev_priv, reg); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 01769e2..d5dcf7f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -86,6 +86,19 @@ enum port {
};
#define port_name(p) ((p) + 'A')
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_NUM_PINS
+};
+
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -93,7 +106,7 @@ enum port {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
@@ -182,9 +195,9 @@ struct drm_i915_master_private {
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 16
-/* 16 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 5
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_fence_reg {
struct list_head lru_list;
@@ -243,7 +256,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer;
+ } *ringbuffer, *batchbuffer, *ctx;
struct drm_i915_error_request {
long jiffies;
u32 seqno;
@@ -271,6 +284,9 @@ struct drm_i915_error_state {
struct intel_display_error_state *display;
};
+struct intel_crtc_config;
+struct intel_crtc;
+
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
@@ -283,9 +299,11 @@ struct drm_i915_display_funcs {
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
void (*modeset_global_resources)(struct drm_device *dev);
+ /* Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state. */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
void (*crtc_enable)(struct drm_crtc *crtc);
@@ -341,6 +359,7 @@ struct drm_i915_gt_funcs {
struct intel_device_info {
u32 display_mmio_offset;
+ u8 num_pipes:3;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
@@ -430,6 +449,7 @@ struct i915_hw_ppgtt {
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
+ int (*enable)(struct drm_device *dev);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
@@ -460,6 +480,7 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
+ PCH_NOP,
};
enum intel_sbi_destination {
@@ -647,6 +668,7 @@ struct intel_gen6_power_mgmt {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
+ u8 hw_max;
struct delayed_work delayed_resume_work;
@@ -905,16 +927,24 @@ typedef struct drm_i915_private {
struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
- u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
- u32 hotplug_supported_mask;
struct work_struct hotplug_work;
bool enable_hotplug_processing;
+ struct {
+ unsigned long hpd_last_jiffies;
+ int hpd_cnt;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } hpd_mark;
+ } hpd_stats[HPD_NUM_PINS];
+ struct timer_list hotplug_reenable_timer;
- int num_pipe;
int num_pch_pll;
+ int num_plane;
unsigned long cfb_size;
unsigned int cfb_fb;
@@ -928,9 +958,14 @@ typedef struct drm_i915_private {
struct intel_overlay *overlay;
unsigned int sprite_scaling_enabled;
+ /* backlight */
+ struct {
+ int level;
+ bool enabled;
+ struct backlight_device *device;
+ } backlight;
+
/* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -941,6 +976,7 @@ typedef struct drm_i915_private {
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct {
@@ -1032,8 +1068,6 @@ typedef struct drm_i915_private {
*/
struct work_struct console_resume_work;
- struct backlight_device *backlight;
-
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
@@ -1340,6 +1374,7 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_DDI(dev) (IS_HASWELL(dev))
+#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1352,6 +1387,7 @@ struct drm_i915_file_private {
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
@@ -1529,17 +1565,12 @@ void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
- struct scatterlist *sg = obj->pages->sgl;
- int nents = obj->pages->nents;
- while (nents > SG_MAX_SINGLE_ALLOC) {
- if (n < SG_MAX_SINGLE_ALLOC - 1)
- break;
-
- sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
- n -= SG_MAX_SINGLE_ALLOC - 1;
- nents -= SG_MAX_SINGLE_ALLOC - 1;
- }
- return sg_page(sg+n);
+ struct sg_page_iter sg_iter;
+
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
+ return sg_page_iter_page(&sg_iter);
+
+ return NULL;
}
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
@@ -1624,7 +1655,6 @@ int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
@@ -1718,6 +1748,11 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ u32 stolen_offset,
+ u32 gtt_offset,
+ u32 size);
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
@@ -1848,6 +1883,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
+int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
@@ -1901,4 +1938,9 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
return VGACNTRL;
}
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e207e6..6be940e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -411,10 +411,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int prefaulted = 0;
int needs_clflush = 0;
- struct scatterlist *sg;
- int i;
+ struct sg_page_iter sg_iter;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
+ user_data = to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
offset = args->offset;
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
- struct page *page;
-
- if (i < offset >> PAGE_SHIFT)
- continue;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+ offset >> PAGE_SHIFT) {
+ struct page *page = sg_page_iter_page(&sg_iter);
if (remain <= 0)
break;
@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -522,7 +518,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_WRITE,
- (char __user *)(uintptr_t)args->data_ptr,
+ to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -613,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret)
goto out_unpin;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
+ user_data = to_user_ptr(args->data_ptr);
remain = args->size;
offset = obj->gtt_offset + args->offset;
@@ -732,10 +728,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int i;
- struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
+ user_data = to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
offset = args->offset;
obj->dirty = 1;
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
- struct page *page;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+ offset >> PAGE_SHIFT) {
+ struct page *page = sg_page_iter_page(&sg_iter);
int partial_cacheline_write;
- if (i < offset >> PAGE_SHIFT)
- continue;
-
if (remain <= 0)
break;
@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -867,11 +859,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_READ,
- (char __user *)(uintptr_t)args->data_ptr,
+ to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
- ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+ ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
args->size);
if (ret)
return -EFAULT;
@@ -1633,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- int page_count = obj->base.size / PAGE_SIZE;
- struct scatterlist *sg;
- int ret, i;
+ struct sg_page_iter sg_iter;
+ int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -1655,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for_each_sg(obj->pages->sgl, sg, page_count, i) {
- struct page *page = sg_page(sg);
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty)
set_page_dirty(page);
@@ -1757,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
struct page *page;
+ unsigned long last_pfn = 0; /* suppress gcc warning */
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
@@ -1787,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
- for_each_sg(st->sgl, sg, page_count, i) {
+ sg = st->sgl;
+ st->nents = 0;
+ for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count);
@@ -1810,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp &= ~(__GFP_IO | __GFP_WAIT);
}
- sg_set_page(sg, page, PAGE_SIZE, 0);
+ if (!i || page_to_pfn(page) != last_pfn + 1) {
+ if (i)
+ sg = sg_next(sg);
+ st->nents++;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ } else {
+ sg->length += PAGE_SIZE;
+ }
+ last_pfn = page_to_pfn(page);
}
+ sg_mark_end(sg);
obj->pages = st;
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1821,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return 0;
err_pages:
- for_each_sg(st->sgl, sg, i, page_count)
- page_cache_release(sg_page(sg));
+ sg_mark_end(sg);
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ page_cache_release(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
return PTR_ERR(page);
@@ -2123,11 +2128,11 @@ static void i915_gem_reset_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, NULL);
-
if (reg->obj)
i915_gem_object_fence_lost(reg->obj);
+ i915_gem_write_fence(dev, i, NULL);
+
reg->pin_count = 0;
reg->obj = NULL;
INIT_LIST_HEAD(&reg->lru_list);
@@ -2678,17 +2683,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs;
}
+static void i915_gem_write_fence__ipi(void *data)
+{
+ wbinvd();
+}
+
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- int reg = fence_number(dev_priv, fence);
-
- i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fence_reg = fence_number(dev_priv, fence);
+
+ /* In order to fully serialize access to the fenced region and
+ * the update to the fence register we need to take extreme
+ * measures on SNB+. In theory, the write to the fence register
+ * flushes all memory transactions before, and coupled with the
+ * mb() placed around the register write we serialise all memory
+ * operations with respect to the changes in the tiler. Yet, on
+ * SNB+ we need to take a step further and emit an explicit wbinvd()
+ * on each processor in order to manually flush all memory
+ * transactions before updating the fence register.
+ */
+ if (HAS_LLC(obj->base.dev))
+ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+ i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
if (enable) {
- obj->fence_reg = reg;
+ obj->fence_reg = fence_reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
@@ -2717,6 +2740,7 @@ int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_fence_reg *fence;
int ret;
ret = i915_gem_object_wait_fence(obj);
@@ -2726,10 +2750,10 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->fence_reg == I915_FENCE_REG_NONE)
return 0;
- i915_gem_object_update_fence(obj,
- &dev_priv->fence_regs[obj->fence_reg],
- false);
+ fence = &dev_priv->fence_regs[obj->fence_reg];
+
i915_gem_object_fence_lost(obj);
+ i915_gem_object_update_fence(obj, fence, false);
return 0;
}
@@ -3986,6 +4010,12 @@ i915_gem_init_hw(struct drm_device *dev)
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+ if (HAS_PCH_NOP(dev)) {
+ u32 temp = I915_READ(GEN7_MSG_CTL);
+ temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+ I915_WRITE(GEN7_MSG_CTL, temp);
+ }
+
i915_gem_l3_remap(dev);
i915_gem_init_swizzling(dev);
@@ -3999,7 +4029,13 @@ i915_gem_init_hw(struct drm_device *dev)
* contexts before PPGTT.
*/
i915_gem_context_init(dev);
- i915_gem_init_ppgtt(dev);
+ if (dev_priv->mm.aliasing_ppgtt) {
+ ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
+ }
+ }
return 0;
}
@@ -4010,7 +4046,16 @@ int i915_gem_init(struct drm_device *dev)
int ret;
mutex_lock(&dev->struct_mutex);
+
+ if (IS_VALLEYVIEW(dev)) {
+ /* VLVA0 (potential hack), BIOS isn't actually waking us */
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
+ if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
+ DRM_DEBUG_DRIVER("allow wake ack timed out\n");
+ }
+
i915_gem_init_global_gtt(dev);
+
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
@@ -4145,7 +4190,9 @@ i915_gem_load(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
- if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
+ dev_priv->num_fence_regs = 32;
+ else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
@@ -4327,7 +4374,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_file *file_priv)
{
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
- char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
+ char __user *user_data = to_user_ptr(args->data_ptr);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 94d873a..a1e8ecb 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
+ if (INTEL_INFO(dev)->gen >= 7) {
+ ret = i915_gem_object_set_cache_level(ctx->obj,
+ I915_CACHE_LLC_MLC);
+ if (ret)
+ goto err_out;
+ }
+
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 6a5af68..dc53a52 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = obj->pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) {
- sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+ sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev;
- struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
struct page **pages;
int ret, i;
@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = -ENOMEM;
- pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+ pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
goto error;
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
- pages[i] = sg_page(sg);
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+ pages[i++] = sg_page_iter_page(&sg_iter);
- obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+ obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
@@ -271,7 +272,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(&obj->base);
- dma_buf_put(dma_buf);
return &obj->base;
}
}
@@ -281,6 +281,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
+ get_dma_buf(dma_buf);
+
obj = i915_gem_object_alloc(dev);
if (obj == NULL) {
ret = -ENOMEM;
@@ -300,5 +302,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
fail_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9a48e1a..117ce38 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret;
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ user_relocs = to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
while (remain) {
@@ -359,8 +359,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -475,7 +474,6 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
- struct drm_file *file,
struct list_head *objects,
bool *need_relocs)
{
@@ -618,7 +616,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
u64 invalid_offset = (u64)-1;
int j;
- user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+ user_relocs = to_user_ptr(exec[i].relocs_ptr);
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
@@ -663,7 +661,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
if (ret)
goto err;
@@ -736,7 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
- char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
@@ -752,7 +750,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
- /* we may also need to update the presumed offsets */
+ /*
+ * We must check that the entire relocation array is safe
+ * to read, but since we may need to update the presumed
+ * offsets during execution, check for full write access.
+ */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
@@ -949,9 +951,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (copy_from_user(cliprects,
- (struct drm_clip_rect __user *)(uintptr_t)
- args->cliprects_ptr,
- sizeof(*cliprects)*args->num_cliprects)) {
+ to_user_ptr(args->cliprects_ptr),
+ sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
@@ -986,13 +987,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(dev, eb);
+ ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1115,7 +1116,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- (void __user *)(uintptr_t)args->buffers_ptr,
+ to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1154,7 +1155,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
+ ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
@@ -1195,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1208,7 +1208,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
+ ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 926a1e2..dca614d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,7 +28,7 @@
#include "i915_trace.h"
#include "intel_drv.h"
-typedef uint32_t gtt_pte_t;
+typedef uint32_t gen6_gtt_pte_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -44,11 +44,11 @@ typedef uint32_t gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
- gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -72,18 +72,85 @@ static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
BUG();
}
-
return pte;
}
+static int gen6_ppgtt_enable(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ gen6_gtt_pte_t __iomem *pd_addr;
+ uint32_t pd_entry;
+ int i;
+
+ pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
+ ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ pt_addr = ppgtt->pt_dma_addr[i];
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+ ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ uint32_t ecochk, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ if (IS_HASWELL(dev)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ I915_WRITE(GAM_ECOCHK, ecochk);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ }
+ return 0;
+}
+
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
- gtt_pte_t *pt_vaddr;
- gtt_pte_t scratch_pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
@@ -96,7 +163,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
@@ -105,7 +172,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
num_entries -= last_pte - first_pte;
first_pte = 0;
- act_pd++;
+ act_pt++;
}
}
@@ -114,43 +181,27 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
enum i915_cache_level cache_level)
{
- gtt_pte_t *pt_vaddr;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned i, j, m, segment_len;
- dma_addr_t page_addr;
- struct scatterlist *sg;
-
- /* init sg walking */
- sg = pages->sgl;
- i = 0;
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
-
- while (i < pages->nents) {
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
- page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
- cache_level);
-
- /* grab the next page */
- if (++m == segment_len) {
- if (++i == pages->nents)
- break;
-
- sg = sg_next(sg);
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
- }
- }
+ gen6_gtt_pte_t *pt_vaddr;
+ unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ struct sg_page_iter sg_iter;
- kunmap_atomic(pt_vaddr);
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ dma_addr_t page_addr;
- first_pte = 0;
- act_pd++;
+ page_addr = sg_page_iter_dma_address(&sg_iter);
+ pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
+ cache_level);
+ if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ kunmap_atomic(pt_vaddr);
+ act_pt++;
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ act_pte = 0;
+
+ }
}
+ kunmap_atomic(pt_vaddr);
}
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
@@ -182,10 +233,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt =
- gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
+ first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->enable = gen6_ppgtt_enable;
ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
ppgtt->cleanup = gen6_ppgtt_cleanup;
@@ -219,12 +270,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
-
ppgtt->clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
- ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
+ ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
return 0;
@@ -256,8 +305,13 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
return -ENOMEM;
ppgtt->dev = dev;
+ ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+
+ if (INTEL_INFO(dev)->gen < 8)
+ ret = gen6_ppgtt_init(ppgtt);
+ else
+ BUG();
- ret = gen6_ppgtt_init(ppgtt);
if (ret)
kfree(ppgtt);
else
@@ -275,6 +329,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
return;
ppgtt->cleanup(ppgtt);
+ dev_priv->mm.aliasing_ppgtt = NULL;
}
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
@@ -294,64 +349,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT);
}
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- gtt_pte_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- return;
-
-
- pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- pt_addr = ppgtt->pt_dma_addr[i];
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
-
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
-
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
- /* GFX_MODE is per-ring on gen7+ */
- }
-
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
- }
-}
-
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
@@ -432,21 +429,16 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct scatterlist *sg = st->sgl;
- gtt_pte_t __iomem *gtt_entries =
- (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
- int unused, i = 0;
- unsigned int len, m = 0;
+ gen6_gtt_pte_t __iomem *gtt_entries =
+ (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ int i = 0;
+ struct sg_page_iter sg_iter;
dma_addr_t addr;
- for_each_sg(st->sgl, sg, st->nents, unused) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (m = 0; m < len; m++) {
- addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- iowrite32(gen6_pte_encode(dev, addr, level),
- &gtt_entries[i]);
- i++;
- }
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+ addr = sg_page_iter_dma_address(&sg_iter);
+ iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
+ i++;
}
/* XXX: This serves as a posting read to make sure that the PTE has
@@ -472,8 +464,8 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
unsigned int num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- gtt_pte_t scratch_pte;
- gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -647,9 +639,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
int ret;
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ if (INTEL_INFO(dev)->gen <= 7) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ }
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -752,15 +747,17 @@ static int gen6_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
*stolen = gen7_get_stolen_size(snb_gmch_ctl);
else
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+
+ /* For Modern GENs the PTEs and register space are split in the BAR */
+ gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+ (pci_resource_len(dev->pdev, 0) / 2);
- /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
- gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
@@ -817,7 +814,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_gtt *gtt = &dev_priv->gtt;
- unsigned long gtt_size;
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
@@ -835,8 +831,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
if (ret)
return ret;
- gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
-
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
dev_priv->gtt.total >> 20);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 69d97cb..130d1db 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -312,6 +312,71 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
return NULL;
}
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ u32 stolen_offset,
+ u32 gtt_offset,
+ u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return NULL;
+
+ DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+ stolen_offset, gtt_offset, size);
+
+ /* KISS and expect everything to be page-aligned */
+ BUG_ON(stolen_offset & 4095);
+ BUG_ON(gtt_offset & 4095);
+ BUG_ON(size & 4095);
+
+ if (WARN_ON(size == 0))
+ return NULL;
+
+ stolen = drm_mm_create_block(&dev_priv->mm.stolen,
+ stolen_offset, size,
+ false);
+ if (stolen == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ return NULL;
+ }
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen object\n");
+ drm_mm_put_block(stolen);
+ return NULL;
+ }
+
+ /* To simplify the initialisation sequence between KMS and GTT,
+ * we allow construction of the stolen object prior to
+ * setting up the GTT space. The actual reservation will occur
+ * later.
+ */
+ if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
+ obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ gtt_offset, size,
+ false);
+ if (obj->gtt_space == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
+ }
+ } else
+ obj->gtt_space = I915_GTT_RESERVED;
+
+ obj->gtt_offset = gtt_offset;
+ obj->has_global_gtt_mapping = 1;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ return obj;
+}
+
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index abcba2f..537545b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -217,9 +217,12 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
tile_width = 512;
/* check maximum stride & object size */
- if (INTEL_INFO(dev)->gen >= 4) {
- /* i965 stores the end address of the gtt mapping in the fence
- * reg, so dont bother to check the size */
+ /* i965+ stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
+ return false;
+ } else if (INTEL_INFO(dev)->gen >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
@@ -235,6 +238,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
}
+ if (stride < tile_width)
+ return false;
+
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
@@ -243,9 +249,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
/* Pre-965 needs power of two tile widths */
- if (stride < tile_width)
- return false;
-
if (stride & (stride - 1))
return false;
@@ -473,28 +476,29 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct scatterlist *sg;
- int page_count = obj->base.size >> PAGE_SHIFT;
+ struct sg_page_iter sg_iter;
int i;
if (obj->bit_17 == NULL)
return;
- for_each_sg(obj->pages->sgl, sg, page_count, i) {
- struct page *page = sg_page(sg);
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
+ i++;
}
}
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -508,11 +512,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
}
- for_each_sg(obj->pages->sgl, sg, page_count, i) {
- struct page *page = sg_page(sg);
- if (page_to_phys(page) & (1 << 17))
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
+ i++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3c7bb04..0aa2ef0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -36,6 +36,61 @@
#include "i915_trace.h"
#include "intel_drv.h"
+static const u32 hpd_ibx[] = {
+ [HPD_CRT] = SDE_CRT_HOTPLUG,
+ [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+};
+
+static const u32 hpd_cpt[] = {
+ [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
+ [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+};
+
+static const u32 hpd_mask_i915[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_EN,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+};
+
+static const u32 hpd_status_gen4[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i965[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static void ibx_hpd_irq_setup(struct drm_device *dev);
+static void i915_hpd_irq_setup(struct drm_device *dev);
+
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -47,7 +102,7 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+static void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != mask) {
@@ -60,26 +115,30 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
- if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = PIPESTAT(pipe);
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0x7fff0000;
- dev_priv->pipestat[pipe] |= mask;
- /* Enable the interrupt, clear any pending status */
- I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- POSTING_READ(reg);
- }
+ if ((pipestat & mask) == mask)
+ return;
+
+ /* Enable the interrupt, clear any pending status */
+ pipestat |= mask | (mask >> 16);
+ I915_WRITE(reg, pipestat);
+ POSTING_READ(reg);
}
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
- if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = PIPESTAT(pipe);
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0x7fff0000;
- dev_priv->pipestat[pipe] &= ~mask;
- I915_WRITE(reg, dev_priv->pipestat[pipe]);
- POSTING_READ(reg);
- }
+ if ((pipestat & mask) == 0)
+ return;
+
+ pipestat &= ~mask;
+ I915_WRITE(reg, pipestat);
+ POSTING_READ(reg);
}
/**
@@ -250,10 +309,9 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+ if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
@@ -279,13 +337,19 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
/*
* Handle hotplug events outside the interrupt handler proper.
*/
+#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
+
static void i915_hotplug_work_func(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
+ struct drm_connector *connector;
+ unsigned long irqflags;
+ bool hpd_disabled = false;
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
@@ -294,9 +358,36 @@ static void i915_hotplug_work_func(struct work_struct *work)
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
- if (encoder->hot_plug)
- encoder->hot_plug(encoder);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (intel_encoder->hpd_pin > HPD_NONE &&
+ dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
+ connector->polled == DRM_CONNECTOR_POLL_HPD) {
+ DRM_INFO("HPD interrupt storm detected on connector %s: "
+ "switching from hotplug detection to polling\n",
+ drm_get_connector_name(connector));
+ dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+ hpd_disabled = true;
+ }
+ }
+ /* if there were no outputs to poll, poll was disabled,
+ * therefore make sure it's enabled when disabling HPD on
+ * some connectors */
+ if (hpd_disabled) {
+ drm_kms_helper_poll_enable(dev);
+ mod_timer(&dev_priv->hotplug_reenable_timer,
+ jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+ }
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (intel_encoder->hot_plug)
+ intel_encoder->hot_plug(intel_encoder);
mutex_unlock(&mode_config->mutex);
@@ -525,6 +616,45 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_THRESHOLD 5
+
+static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
+ u32 hotplug_trigger,
+ const u32 *hpd)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ int i;
+ bool ret = false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ for (i = 1; i < HPD_NUM_PINS; i++) {
+
+ if (!(hpd[i] & hotplug_trigger) ||
+ dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+ continue;
+
+ if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
+ dev_priv->hpd_stats[i].hpd_last_jiffies
+ + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
+ dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
+ dev_priv->hpd_stats[i].hpd_cnt = 0;
+ } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
+ dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
+ DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
+ ret = true;
+ } else {
+ dev_priv->hpd_stats[i].hpd_cnt++;
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return ret;
+}
+
static void gmbus_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -593,13 +723,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
+ if (hotplug_trigger) {
+ if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+ i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
-
+ }
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
@@ -623,10 +756,13 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- if (pch_iir & SDE_HOTPLUG_MASK)
+ if (hotplug_trigger) {
+ if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
+ ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-
+ }
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -669,10 +805,13 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ if (hotplug_trigger) {
+ if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
+ ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-
+ }
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -701,7 +840,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
int i;
@@ -716,9 +855,11 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- sde_ier = I915_READ(SDEIER);
- I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
+ if (!HAS_PCH_NOP(dev)) {
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+ }
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
@@ -745,7 +886,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
}
/* check event from PCH */
- if (de_iir & DE_PCH_EVENT_IVB) {
+ if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
cpt_irq_handler(dev, pch_iir);
@@ -768,8 +909,10 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
- I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
+ if (!HAS_PCH_NOP(dev)) {
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
+ }
return ret;
}
@@ -937,6 +1080,8 @@ static void i915_error_work_func(struct work_struct *work)
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
+ intel_display_handle_reset(dev);
+
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
@@ -972,24 +1117,23 @@ static void i915_get_extra_instdone(struct drm_device *dev,
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *src)
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src,
+ const int num_pages)
{
struct drm_i915_error_object *dst;
- int i, count;
+ int i;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
- count = src->base.size / PAGE_SIZE;
-
- dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
@@ -1039,7 +1183,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
reloc_offset += PAGE_SIZE;
}
- dst->page_count = count;
+ dst->page_count = num_pages;
dst->gtt_offset = src->gtt_offset;
return dst;
@@ -1050,6 +1194,9 @@ unwind:
kfree(dst);
return NULL;
}
+#define i915_error_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), \
+ (src)->base.size>>PAGE_SHIFT)
static void
i915_error_object_free(struct drm_i915_error_object *obj)
@@ -1148,7 +1295,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
- for (i = 0; i < 16; i++)
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
@@ -1256,6 +1403,26 @@ static void i915_record_ring_state(struct drm_device *dev,
error->cpu_ring_tail[ring->id] = ring->tail;
}
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+ struct drm_i915_error_state *error,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Currently render ring is the only HW context user */
+ if (ring->id != RCS || !error->ccid)
+ return;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+ if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
+ ering->ctx = i915_error_object_create_sized(dev_priv,
+ obj, 1);
+ }
+ }
+}
+
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
@@ -1273,6 +1440,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].ringbuffer =
i915_error_object_create(dev_priv, ring->obj);
+
+ i915_gem_record_active_context(ring, error, &error->ring[i]);
+
count = 0;
list_for_each_entry(request, &ring->request_list, list)
count++;
@@ -1328,14 +1498,15 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("capturing error event; look for more information in"
+ DRM_INFO("capturing error event; look for more information in "
"/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- error->ccid = I915_READ(CCID);
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1356,8 +1527,9 @@ static void i915_capture_error_state(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 6)
error->forcewake = I915_READ(FORCEWAKE);
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+ if (!HAS_PCH_SPLIT(dev))
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
@@ -1567,7 +1739,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
}
-static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -1777,6 +1949,37 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
return false;
}
+static bool semaphore_passed(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
+ struct intel_ring_buffer *signaller;
+ u32 cmd, ipehr, acthd_min;
+
+ ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+ if ((ipehr & ~(0x3 << 16)) !=
+ (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
+ return false;
+
+ /* ACTHD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX.
+ */
+ acthd_min = max((int)acthd - 3 * 4, 0);
+ do {
+ cmd = ioread32(ring->virtual_start + acthd);
+ if (cmd == ipehr)
+ break;
+
+ acthd -= 4;
+ if (acthd < acthd_min)
+ return false;
+ } while (1);
+
+ signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
+ return i915_seqno_passed(signaller->get_seqno(signaller, false),
+ ioread32(ring->virtual_start+acthd+4)+1);
+}
+
static bool kick_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@@ -1788,6 +1991,15 @@ static bool kick_ring(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, tmp);
return true;
}
+
+ if (INTEL_INFO(dev)->gen >= 6 &&
+ tmp & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(ring)) {
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
return false;
}
@@ -1901,9 +2113,18 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
+ if (HAS_PCH_NOP(dev))
+ return;
+
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
+ /*
+ * SDEIER is also touched by the interrupt handler to work around missed
+ * PCH interrupts. Hence we can't update it after the interrupt handler
+ * is enabled - instead we unconditionally enable all PCH interrupt
+ * sources here, but then only unmask them as needed with SDEIMR.
+ */
+ I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
@@ -1939,18 +2160,34 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
}
-/*
- * Enable digital hotplug on the PCH, and configure the DP short pulse
- * duration to 2ms (which is the minimum in the Display Port spec)
- *
- * This register is the same on all known PCH chips.
- */
-
-static void ibx_enable_hotplug(struct drm_device *dev)
+static void ibx_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *intel_encoder;
+ u32 mask = ~I915_READ(SDEIMR);
+ u32 hotplug;
+
+ if (HAS_PCH_IBX(dev)) {
+ mask &= ~SDE_HOTPLUG_MASK;
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ mask |= hpd_ibx[intel_encoder->hpd_pin];
+ } else {
+ mask &= ~SDE_HOTPLUG_MASK_CPT;
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ mask |= hpd_cpt[intel_encoder->hpd_pin];
+ }
+ I915_WRITE(SDEIMR, ~mask);
+
+ /*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
@@ -1965,20 +2202,15 @@ static void ibx_irq_postinstall(struct drm_device *dev)
u32 mask;
if (HAS_PCH_IBX(dev))
- mask = SDE_HOTPLUG_MASK |
- SDE_GMBUS |
- SDE_AUX_MASK;
+ mask = SDE_GMBUS | SDE_AUX_MASK;
else
- mask = SDE_HOTPLUG_MASK_CPT |
- SDE_GMBUS_CPT |
- SDE_AUX_MASK_CPT;
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
+
+ if (HAS_PCH_NOP(dev))
+ return;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, ~mask);
- I915_WRITE(SDEIER, mask);
- POSTING_READ(SDEIER);
-
- ibx_enable_hotplug(dev);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -2089,9 +2321,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
-
/* Hack for broken MSIs on VLV */
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
pci_read_config_word(dev->pdev, 0x98, &msid);
@@ -2135,30 +2364,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void valleyview_hpd_irq_setup(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTD_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
- hotplug_en |= CRT_HOTPLUG_INT_EN;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- }
-
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-}
-
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2167,6 +2372,8 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -2188,6 +2395,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(DEIMR, 0xffffffff);
@@ -2198,6 +2407,9 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(GTIER, 0x0);
I915_WRITE(GTIIR, I915_READ(GTIIR));
+ if (HAS_PCH_NOP(dev))
+ return;
+
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
@@ -2221,9 +2433,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
-
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2246,6 +2455,37 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i8xx_handle_vblank(struct drm_device *dev,
+ int pipe, u16 iir)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+ if (!drm_handle_vblank(dev, pipe))
+ return false;
+
+ if ((iir & flip_pending) == 0)
+ return false;
+
+ intel_prepare_page_flip(dev, pipe);
+
+ /* We detect FlipDone by looking for the change in PendingFlip from '1'
+ * to '0' on the following vblank, i.e. IIR has the Pendingflip
+ * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+ * the flip is completed (no longer pending). Since this doesn't raise
+ * an interrupt per se, we watch for the change at vblank.
+ */
+ if (I915_READ16(ISR) & flip_pending)
+ return false;
+
+ intel_finish_page_flip(dev, pipe);
+
+ return true;
+}
+
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -2301,22 +2541,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
notify_ring(dev, &dev_priv->ring[RCS]);
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
- drm_handle_vblank(dev, 0)) {
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip(dev, 0);
- flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
- }
- }
+ i8xx_handle_vblank(dev, 0, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
- drm_handle_vblank(dev, 1)) {
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip(dev, 1);
- flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- }
- }
+ i8xx_handle_vblank(dev, 1, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
iir = new_iir;
}
@@ -2364,9 +2594,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
-
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
@@ -2404,33 +2631,35 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i915_hpd_irq_setup(struct drm_device *dev)
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i915_handle_vblank(struct drm_device *dev,
+ int plane, int pipe, u32 iir)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug_en;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (I915_HAS_HOTPLUG(dev)) {
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ if (!drm_handle_vblank(dev, pipe))
+ return false;
- if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTD_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
- hotplug_en |= CRT_HOTPLUG_INT_EN;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- }
+ if ((iir & flip_pending) == 0)
+ return false;
- /* Ignore TV since it's buggy */
+ intel_prepare_page_flip(dev, plane);
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- }
+ /* We detect FlipDone by looking for the change in PendingFlip from '1'
+ * to '0' on the following vblank, i.e. IIR has the Pendingflip
+ * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+ * the flip is completed (no longer pending). Since this doesn't raise
+ * an interrupt per se, we watch for the change at vblank.
+ */
+ if (I915_READ(ISR) & flip_pending)
+ return false;
+
+ intel_finish_page_flip(dev, pipe);
+
+ return true;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2442,10 +2671,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- u32 flip[2] = {
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
- };
int pipe, ret = IRQ_NONE;
atomic_inc(&dev_priv->irq_received);
@@ -2486,13 +2711,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if ((I915_HAS_HOTPLUG(dev)) &&
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
+ if (hotplug_trigger) {
+ if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+ i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
-
+ }
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
}
@@ -2507,14 +2735,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
int plane = pipe;
if (IS_MOBILE(dev))
plane = !plane;
+
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- drm_handle_vblank(dev, pipe)) {
- if (iir & flip[plane]) {
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
- flip_mask &= ~flip[plane];
- }
- }
+ i915_handle_vblank(dev, plane, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -2552,6 +2776,8 @@ static void i915_irq_uninstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2603,13 +2829,13 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
enable_mask = ~dev_priv->irq_mask;
+ enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask |= I915_USER_INTERRUPT;
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
/*
@@ -2639,45 +2865,33 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i965_hpd_irq_setup(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *intel_encoder;
u32 hotplug_en;
- /* Note HDMI and DP share hotplug bits */
- hotplug_en = 0;
- if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
- hotplug_en |= PORTD_HOTPLUG_INT_EN;
- if (IS_G4X(dev)) {
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- } else {
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- }
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
- hotplug_en |= CRT_HOTPLUG_INT_EN;
-
+ if (I915_HAS_HOTPLUG(dev)) {
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+ /* Note HDMI and DP share hotplug bits */
+ /* enable bits are the same for all generations */
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
- */
+ */
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- }
-
- /* Ignore TV since it's buggy */
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ /* Ignore TV since it's buggy */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2689,6 +2903,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE, pipe;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
@@ -2697,7 +2914,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
for (;;) {
bool blc_event = false;
- irq_received = iir != 0;
+ irq_received = (iir & ~flip_mask) != 0;
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
@@ -2733,18 +2950,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
+ HOTPLUG_INT_STATUS_G4X :
+ HOTPLUG_INT_STATUS_I965);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
+ if (hotplug_trigger) {
+ if (hotplug_irq_storm_detect(dev, hotplug_trigger,
+ IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
+ i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
-
+ }
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
- I915_WRITE(IIR, iir);
+ I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
@@ -2752,18 +2975,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
- intel_prepare_page_flip(dev, 0);
-
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
- intel_prepare_page_flip(dev, 1);
-
for_each_pipe(pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- drm_handle_vblank(dev, pipe)) {
- i915_pageflip_stall_check(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
+ i915_handle_vblank(dev, pipe, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -2807,6 +3022,8 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2822,6 +3039,41 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
+static void i915_reenable_hotplug_timer_func(unsigned long data)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
+ struct drm_connector *connector;
+
+ if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
+ continue;
+
+ dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (intel_connector->encoder->hpd_pin == i) {
+ if (connector->polled != intel_connector->polled)
+ DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+ drm_get_connector_name(connector));
+ connector->polled = intel_connector->polled;
+ if (!connector->polled)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+ }
+ }
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
void intel_irq_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2834,6 +3086,8 @@ void intel_irq_init(struct drm_device *dev)
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
+ setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+ (unsigned long) dev_priv);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@ -2857,7 +3111,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
- dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
@@ -2866,6 +3120,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2873,6 +3128,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else {
if (INTEL_INFO(dev)->gen == 2) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
@@ -2890,7 +3146,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
- dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
@@ -2900,7 +3156,20 @@ void intel_irq_init(struct drm_device *dev)
void intel_hpd_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ int i;
+ for (i = 1; i < HPD_NUM_PINS; i++) {
+ dev_priv->hpd_stats[i].hpd_cnt = 0;
+ dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+ }
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ connector->polled = intel_connector->polled;
+ if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 848992f..83f9c26 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -91,6 +91,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
@@ -121,10 +122,17 @@
#define GAM_ECOCHK 0x4090
#define ECOCHK_SNB_BIT (1<<10)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
+#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
+#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
+#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
#define GAC_ECO_BITS 0x14090
+#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
#define ECOBITS_PPGTT_CACHE4B (0<<8)
@@ -422,6 +430,7 @@
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
/* control register for cpu gtt access */
#define TILECTL 0x101000
@@ -522,6 +531,9 @@
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define FPGA_DBG 0x42300
+#define FPGA_DBG_RM_NOCLAIM (1<<31)
+
#define DERRMR 0x44050
/* GM45+ chicken bits -- debug workaround bits that may be required
@@ -591,6 +603,7 @@
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
#define I915_BSD_USER_INTERRUPT (1<<25)
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
@@ -1197,6 +1210,9 @@
#define MCHBAR_MIRROR_BASE_SNB 0x140000
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK 0x5e04
+
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1637,6 +1653,12 @@
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
/* must use period 64 on GM45 according to docs */
@@ -1675,43 +1697,84 @@
#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
-
-/* SDVO port control */
-#define SDVOB 0x61140
-#define SDVOC 0x61160
-#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_B_SELECT (1 << 30)
-#define SDVO_STALL_SELECT (1 << 29)
-#define SDVO_INTERRUPT_ENABLE (1 << 26)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I965 | \
+ SDVOC_HOTPLUG_INT_STATUS_I965 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define GEN3_SDVOB 0x61140
+#define GEN3_SDVOC 0x61160
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define PCH_SDVOB 0xe1140
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC 0xe1150
+#define PCH_HDMID 0xe1160
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
/**
* 915G/GM SDVO pixel multiplier.
- *
* Programmed value is multiplier - 1, up to 5x.
- *
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
*/
-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
#define SDVO_PORT_MULTIPLY_SHIFT 23
-#define SDVO_PHASE_SELECT_MASK (15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-#define SDVOC_GANG_MODE (1 << 16)
-#define SDVO_ENCODING_SDVO (0x0 << 10)
-#define SDVO_ENCODING_HDMI (0x2 << 10)
-/** Requird for HDMI operation */
-#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
-#define SDVO_COLOR_RANGE_16_235 (1 << 8)
-#define SDVO_BORDER_ENABLE (1 << 7)
-#define SDVO_AUDIO_ENABLE (1 << 6)
-/** New with 965, default is to be set */
-#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
-/** New with 965, default is to be set */
-#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
-#define SDVOB_PCIE_CONCURRENCY (1 << 3)
-#define SDVO_DETECTED (1 << 2)
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
/* Bits to be preserved when writing */
-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
-#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define SDVO_AUDIO_ENABLE (1 << 6)
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+
/* DVO port control */
#define DVOA 0x61120
@@ -1898,7 +1961,7 @@
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
/* Backlight control */
-#define BLC_PWM_CTL2 0x61250 /* 965+ only */
+#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
@@ -1917,7 +1980,7 @@
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL 0x61254
+#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
@@ -1939,7 +2002,7 @@
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-#define BLC_HIST_CTL 0x61260
+#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
@@ -2589,14 +2652,14 @@
#define _PIPEB_GMCH_DATA_M 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
-#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE_MASK (0x3f << 25)
-#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+#define DATA_LINK_M_N_MASK (0xffffff)
+#define DATA_LINK_N_MAX (0x800000)
#define _PIPEA_GMCH_DATA_N 0x70054
#define _PIPEB_GMCH_DATA_N 0x71054
-#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
* Computing Link M and N values for the Display Port link
@@ -2611,11 +2674,9 @@
#define _PIPEA_DP_LINK_M 0x70060
#define _PIPEB_DP_LINK_M 0x71060
-#define PIPEA_DP_LINK_M_MASK (0xffffff)
#define _PIPEA_DP_LINK_N 0x70064
#define _PIPEB_DP_LINK_N 0x71064
-#define PIPEA_DP_LINK_N_MASK (0xffffff)
#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
@@ -2776,6 +2837,8 @@
#define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
+#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
+#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
@@ -3233,6 +3296,63 @@
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+#define _SPACNTR 0x72180
+#define SP_ENABLE (1<<31)
+#define SP_GEAMMA_ENABLE (1<<30)
+#define SP_PIXFORMAT_MASK (0xf<<26)
+#define SP_FORMAT_YUV422 (0<<26)
+#define SP_FORMAT_BGR565 (5<<26)
+#define SP_FORMAT_BGRX8888 (6<<26)
+#define SP_FORMAT_BGRA8888 (7<<26)
+#define SP_FORMAT_RGBX1010102 (8<<26)
+#define SP_FORMAT_RGBA1010102 (9<<26)
+#define SP_FORMAT_RGBX8888 (0xe<<26)
+#define SP_FORMAT_RGBA8888 (0xf<<26)
+#define SP_SOURCE_KEY (1<<22)
+#define SP_YUV_BYTE_ORDER_MASK (3<<16)
+#define SP_YUV_ORDER_YUYV (0<<16)
+#define SP_YUV_ORDER_UYVY (1<<16)
+#define SP_YUV_ORDER_YVYU (2<<16)
+#define SP_YUV_ORDER_VYUY (3<<16)
+#define SP_TILED (1<<10)
+#define _SPALINOFF 0x72184
+#define _SPASTRIDE 0x72188
+#define _SPAPOS 0x7218c
+#define _SPASIZE 0x72190
+#define _SPAKEYMINVAL 0x72194
+#define _SPAKEYMSK 0x72198
+#define _SPASURF 0x7219c
+#define _SPAKEYMAXVAL 0x721a0
+#define _SPATILEOFF 0x721a4
+#define _SPACONSTALPHA 0x721a8
+#define _SPAGAMC 0x721f4
+
+#define _SPBCNTR 0x72280
+#define _SPBLINOFF 0x72284
+#define _SPBSTRIDE 0x72288
+#define _SPBPOS 0x7228c
+#define _SPBSIZE 0x72290
+#define _SPBKEYMINVAL 0x72294
+#define _SPBKEYMSK 0x72298
+#define _SPBSURF 0x7229c
+#define _SPBKEYMAXVAL 0x722a0
+#define _SPBTILEOFF 0x722a4
+#define _SPBCONSTALPHA 0x722a8
+#define _SPBGAMC 0x722f4
+
+#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -3282,8 +3402,6 @@
#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
-#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define PIPE_DATA_N1_OFFSET 0
@@ -3456,6 +3574,9 @@
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
+#define GEN7_MSG_CTL 0x45010
+#define WAIT_FOR_PCH_RESET_ACK (1<<1)
+#define WAIT_FOR_PCH_FLR_ACK (1<<0)
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
@@ -3508,7 +3629,11 @@
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
-#define SDE_HOTPLUG_MASK (0xf << 8)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
#define SDE_TRANSB_CRC_DONE (1 << 5)
#define SDE_TRANSB_CRC_ERR (1 << 4)
#define SDE_TRANSB_FIFO_UNDER (1 << 3)
@@ -3531,7 +3656,9 @@
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
@@ -3754,14 +3881,16 @@
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define HSW_VIDEO_DIP_GCP_B 0x61210
-#define HSW_TVIDEO_DIP_CTL(pipe) \
- _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
-#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
- _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
-#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
- _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
-#define HSW_TVIDEO_DIP_GCP(pipe) \
- _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+#define HSW_TVIDEO_DIP_CTL(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
@@ -3826,8 +3955,11 @@
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
@@ -3976,34 +4108,6 @@
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* or SDVOB */
-#define HDMIB 0xe1140
-#define PORT_ENABLE (1 << 31)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_CPT(pipe) ((pipe) << 29)
-#define TRANSCODER_MASK (1 << 30)
-#define TRANSCODER_MASK_CPT (3 << 29)
-#define COLOR_FORMAT_8bpc (0)
-#define COLOR_FORMAT_12bpc (3 << 26)
-#define SDVOB_HOTPLUG_ENABLE (1 << 23)
-#define SDVO_ENCODING (0)
-#define TMDS_ENCODING (2 << 10)
-#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
-/* CPT */
-#define HDMI_MODE_SELECT (1 << 9)
-#define DVI_MODE_SELECT (0)
-#define SDVOB_BORDER_ENABLE (1 << 7)
-#define AUDIO_ENABLE (1 << 6)
-#define VSYNC_ACTIVE_HIGH (1 << 4)
-#define HSYNC_ACTIVE_HIGH (1 << 3)
-#define PORT_DETECTED (1 << 2)
-
-/* PCH SDVOB multiplex with HDMIB */
-#define PCH_SDVOB HDMIB
-
-#define HDMIC 0xe1150
-#define HDMID 0xe1160
-
#define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1)
@@ -4020,6 +4124,15 @@
#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
+#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
+#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
+#define VLV_PIPE_PP_ON_DELAYS(pipe) \
+ _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
+#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
+ _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
+#define VLV_PIPE_PP_DIVISOR(pipe) \
+ _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
+
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
@@ -4149,8 +4262,12 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
+#define FORCEWAKE_MEDIA_VLV 0x1300b8
+#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
+#define VLV_GTLC_WAKE_CTRL 0x130090
+#define VLV_GTLC_PW_STATUS 0x130094
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -4184,6 +4301,7 @@
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
+#define HSW_FREQUENCY(x) ((x)<<24)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
@@ -4274,6 +4392,21 @@
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+
+#define VLV_IOSF_DOORBELL_REQ 0x182100
+#define IOSF_DEVFN_SHIFT 24
+#define IOSF_OPCODE_SHIFT 16
+#define IOSF_PORT_SHIFT 8
+#define IOSF_BYTE_ENABLES_SHIFT 4
+#define IOSF_BAR_SHIFT 1
+#define IOSF_SB_BUSY (1<<0)
+#define IOSF_PORT_PUNIT 0x4
+#define VLV_IOSF_DATA 0x182104
+#define VLV_IOSF_ADDR 0x182108
+
+#define PUNIT_OPCODE_REG_READ 6
+#define PUNIT_OPCODE_REG_WRITE 7
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2135f21..41f0fde 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -209,7 +209,8 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -255,6 +256,7 @@ static void i915_save_display(struct drm_device *dev)
static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask = 0xffffffff;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
@@ -267,10 +269,13 @@ static void i915_restore_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
- } else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ mask = ~LVDS_PORT_EN;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
+ else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 9462081..d5e1890 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,7 +49,7 @@ static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
- return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
}
static ssize_t
@@ -57,7 +57,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
- return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
static ssize_t
@@ -65,7 +65,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
- return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
static ssize_t
@@ -73,7 +73,7 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
- return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
@@ -215,7 +215,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
- return snprintf(buf, PAGE_SIZE, "%d", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -229,7 +229,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
- return snprintf(buf, PAGE_SIZE, "%d", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -239,7 +239,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, rp_state_cap, hw_max, hw_min;
+ u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
ssize_t ret;
ret = kstrtou32(buf, 0, &val);
@@ -251,7 +251,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = (rp_state_cap & 0xff);
+ hw_max = dev_priv->rps.hw_max;
+ non_oc_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
@@ -259,6 +260,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
return -EINVAL;
}
+ if (val > non_oc_max)
+ DRM_DEBUG("User requested overclocking to %d\n",
+ val * GT_FREQUENCY_MULTIPLIER);
+
if (dev_priv->rps.cur_delay > val)
gen6_set_rps(dev_priv->dev, val);
@@ -280,7 +285,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
- return snprintf(buf, PAGE_SIZE, "%d", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -302,7 +307,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = (rp_state_cap & 0xff);
+ hw_max = dev_priv->rps.hw_max;
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
@@ -355,7 +360,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
} else {
BUG();
}
- return snprintf(buf, PAGE_SIZE, "%d", val);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static const struct attribute *gen6_attrs[] = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 55ffba1..95070b2 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
- dev_priv->display_clock_mode);
+ dev_priv->display_clock_mode,
+ dev_priv->fdi_rx_polarity_inverted);
}
}
@@ -692,6 +694,9 @@ intel_parse_bios(struct drm_device *dev)
struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
+ if (HAS_PCH_NOP(dev))
+ return -ENODEV;
+
init_vbt_defaults(dev_priv);
/* XXX Should this validation be moved to intel_opregion.c? */
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 36e57f9..e088d6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -127,7 +127,9 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:6; /* finish byte */
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
/* bits 4 */
u8 legacy_monitor_detect;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 1ce45a0..58b4a53 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -199,10 +199,14 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
+
+ if (HAS_PCH_SPLIT(dev))
+ pipe_config->has_pch_encoder = true;
+
return true;
}
@@ -676,7 +680,6 @@ static void intel_crt_reset(struct drm_connector *connector)
*/
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
- .mode_fixup = intel_crt_mode_fixup,
.mode_set = intel_crt_mode_set,
};
@@ -768,8 +771,11 @@ void intel_crt_init(struct drm_device *dev)
else
crt->adpa_reg = ADPA;
+ crt->base.compute_config = intel_crt_compute_config;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
+ if (I915_HAS_HOTPLUG(dev))
+ crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
@@ -781,18 +787,14 @@ void intel_crt_init(struct drm_device *dev)
drm_sysfs_connector_add(connector);
- if (I915_HAS_HOTPLUG(dev))
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (!I915_HAS_HOTPLUG(dev))
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
- dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
-
/*
* TODO: find a proper way to discover whether we need to set the the
* polarity and link reversal bits or not, instead of relying on the
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8d0bac3..26a0a57 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -898,6 +898,9 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
plls->spll_refcount++;
reg = SPLL_CTL;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ } else {
+ DRM_ERROR("SPLL already in use\n");
+ return false;
}
WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
@@ -921,14 +924,14 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->bpp) {
+ switch (intel_crtc->config.pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@@ -942,22 +945,20 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
temp |= TRANS_MSA_12_BPC;
break;
default:
- temp |= TRANS_MSA_8_BPC;
- WARN(1, "%d bpp unsupported by DDI function\n",
- intel_crtc->bpp);
+ BUG();
}
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
}
-void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
@@ -966,7 +967,7 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
- switch (intel_crtc->bpp) {
+ switch (intel_crtc->config.pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -980,8 +981,7 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
temp |= TRANS_DDI_BPC_12;
break;
default:
- WARN(1, "%d bpp unsupported by transcoder DDI function\n",
- intel_crtc->bpp);
+ BUG();
}
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
@@ -1150,14 +1150,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
- return true;
+ return false;
}
static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
uint32_t temp, ret;
- enum port port;
+ enum port port = I915_MAX_PORTS;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int i;
@@ -1173,10 +1173,16 @@ static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
port = i;
}
- ret = I915_READ(PORT_CLK_SEL(port));
-
- DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
- pipe_name(pipe), port_name(port), ret);
+ if (port == I915_MAX_PORTS) {
+ WARN(1, "Pipe %c enabled on an unknown port\n",
+ pipe_name(pipe));
+ ret = PORT_CLK_SEL_NONE;
+ } else {
+ ret = I915_READ(PORT_CLK_SEL(port));
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
+ "0x%08x\n", pipe_name(pipe), port_name(port),
+ ret);
+ }
return ret;
}
@@ -1217,7 +1223,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1227,7 +1233,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1341,15 +1347,15 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp);
}
-
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1467,19 +1473,17 @@ static void intel_ddi_destroy(struct drm_encoder *encoder)
intel_dp_encoder_destroy(encoder);
}
-static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- int type = intel_encoder->type;
+ int type = encoder->type;
- WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
if (type == INTEL_OUTPUT_HDMI)
- return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+ return intel_hdmi_compute_config(encoder, pipe_config);
else
- return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+ return intel_dp_compute_config(encoder, pipe_config);
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -1487,7 +1491,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
};
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
- .mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
};
@@ -1527,6 +1530,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+ intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
@@ -1537,9 +1541,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
- intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
- else
- intel_dig_port->hdmi.sdvox_reg = 0;
+ intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b20d501..efe8299 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -71,8 +71,24 @@ typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
- bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *, intel_clock_t *);
+ /**
+ * find_pll() - Find the best values for the PLL
+ * @limit: limits for the PLL
+ * @crtc: current CRTC
+ * @target: target frequency in kHz
+ * @refclk: reference clock frequency in kHz
+ * @match_clock: if provided, @best_clock P divider must
+ * match the P divider from @match_clock
+ * used for LVDS downclocking
+ * @best_clock: best PLL values found
+ *
+ * Returns true on success, false on failure.
+ */
+ bool (*find_pll)(const intel_limit_t *limit,
+ struct drm_crtc *crtc,
+ int target, int refclk,
+ intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
};
/* FDI */
@@ -471,7 +487,6 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
- /* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
@@ -498,10 +513,8 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
- /* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
- /* LVDS with dual channel */
limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
@@ -879,7 +892,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- return intel_crtc->cpu_transcoder;
+ return intel_crtc->config.cpu_transcoder;
}
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
@@ -1214,8 +1227,8 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
- !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
+ if (!intel_using_power_well(dev_priv->dev) &&
+ cpu_transcoder != TRANSCODER_EDP) {
cur_state = false;
} else {
reg = PIPECONF(cpu_transcoder);
@@ -1254,7 +1267,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
int cur_pipe;
/* Planes are fixed to pipes on ILK+ */
- if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
WARN((val & DISPLAY_PLANE_ENABLE),
@@ -1275,6 +1288,25 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
}
}
+static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg, i;
+ u32 val;
+
+ if (!IS_VALLEYVIEW(dev_priv->dev))
+ return;
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < dev_priv->num_plane; i++) {
+ reg = SPCNTR(pipe, i);
+ val = I915_READ(reg);
+ WARN((val & SP_ENABLE),
+ "sprite %d assertion failure, should be off on pipe %c but is still active\n",
+ pipe * 2 + i, pipe_name(pipe));
+ }
+}
+
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -1327,14 +1359,14 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 val)
{
- if ((val & PORT_ENABLE) == 0)
+ if ((val & SDVO_ENABLE) == 0)
return false;
if (HAS_PCH_CPT(dev_priv->dev)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
return false;
} else {
- if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
+ if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
return false;
}
return true;
@@ -1392,7 +1424,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1419,9 +1451,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
- assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
/**
@@ -1859,6 +1891,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* or we might hang the display.
*/
assert_planes_disabled(dev_priv, pipe);
+ assert_sprites_disabled(dev_priv, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
@@ -1937,6 +1970,15 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
+static bool need_vtd_wa(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -1960,13 +2002,23 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
alignment = 0;
break;
case I915_TILING_Y:
- /* FIXME: Is this true? */
- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ /* Despite that we check this in framebuffer_init userspace can
+ * screw us over and change the tiling after the fact. Only
+ * pinned buffers can't change their tiling. */
+ DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
return -EINVAL;
default:
BUG();
}
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (need_vtd_wa(dev) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
if (ret)
@@ -2083,8 +2135,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
- return -EINVAL;
+ BUG();
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -2177,8 +2228,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
- return -EINVAL;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -2229,6 +2279,44 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return dev_priv->display.update_plane(crtc, fb, x, y);
}
+void intel_display_handle_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so complete all pending flips so that user space
+ * will get its events and not get stuck.
+ *
+ * Also update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ *
+ * Need to make two loops over the crtcs so that we
+ * don't try to grab a crtc mutex before the
+ * pending_flip_queue really got woken up.
+ */
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum plane plane = intel_crtc->plane;
+
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip_plane(dev, plane);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ mutex_lock(&crtc->mutex);
+ if (intel_crtc->active)
+ dev_priv->display.update_plane(crtc, crtc->fb,
+ crtc->x, crtc->y);
+ mutex_unlock(&crtc->mutex);
+ }
+}
+
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
@@ -2295,10 +2383,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- if(intel_crtc->plane > dev_priv->num_pipe) {
+ if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
intel_crtc->plane,
- dev_priv->num_pipe);
+ INTEL_INFO(dev)->num_pipes);
return -EINVAL;
}
@@ -2312,9 +2400,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (crtc->fb)
- intel_finish_fb(crtc->fb);
-
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2912,32 +2997,6 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
-
- /*
- * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
- * must be driven by its own crtc; no sharing is possible.
- */
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_EDP:
- if (!intel_encoder_is_pch_edp(&intel_encoder->base))
- return false;
- continue;
- }
- }
-
- return true;
-}
-
-static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
-{
- return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
-}
-
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
@@ -3144,7 +3203,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
assert_transcoder_disabled(dev_priv, TRANSCODER_A);
@@ -3273,7 +3332,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
- bool is_pch_port;
WARN_ON(!crtc->enabled);
@@ -3289,9 +3347,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = ironlake_crtc_driving_pch(crtc);
- if (is_pch_port) {
+ if (intel_crtc->config.has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
@@ -3328,10 +3385,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
*/
intel_crtc_load_lut(crtc);
- intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_pipe(dev_priv, pipe,
+ intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
- if (is_pch_port)
+ if (intel_crtc->config.has_pch_encoder)
ironlake_pch_enable(crtc);
mutex_lock(&dev->struct_mutex);
@@ -3365,7 +3423,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- bool is_pch_port;
WARN_ON(!crtc->enabled);
@@ -3375,9 +3432,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- is_pch_port = haswell_crtc_driving_pch(crtc);
-
- if (is_pch_port)
+ if (intel_crtc->config.has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -3406,12 +3461,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
intel_ddi_set_pipe_settings(crtc);
- intel_ddi_enable_pipe_func(crtc);
+ intel_ddi_enable_transcoder_func(crtc);
- intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_pipe(dev_priv, pipe,
+ intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
- if (is_pch_port)
+ if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
mutex_lock(&dev->struct_mutex);
@@ -3522,14 +3578,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- bool is_pch_port;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (!intel_crtc->active)
return;
- is_pch_port = haswell_crtc_driving_pch(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -3546,9 +3599,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- /* Disable PF */
- I915_WRITE(PF_CTL(pipe), 0);
- I915_WRITE(PF_WIN_SZ(pipe), 0);
+ /* XXX: Once we have proper panel fitter state tracking implemented with
+ * hardware state read/check support we should switch to only disable
+ * the panel fitter when we know it's used. */
+ if (intel_using_power_well(dev)) {
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+ }
intel_ddi_disable_pipe_clock(intel_crtc);
@@ -3556,7 +3613,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (is_pch_port) {
+ if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_ddi_fdi_disable(crtc);
}
@@ -3581,7 +3638,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
- intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+ intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
intel_ddi_put_crtc_pll(crtc);
}
@@ -3667,6 +3724,26 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
encoder->enable(encoder);
}
+static void i9xx_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ uint32_t pctl = I915_READ(PFIT_CONTROL);
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
+ else
+ pipe = PIPE_B;
+
+ if (pipe == crtc->pipe) {
+ DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl);
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
+}
+
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3675,8 +3752,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 pctl;
-
if (!intel_crtc->active)
return;
@@ -3696,11 +3771,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
- /* Disable pannel fitter if it is on this pipe. */
- pctl = I915_READ(PFIT_CONTROL);
- if ((pctl & PFIT_ENABLE) &&
- ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
- I915_WRITE(PFIT_CONTROL, 0);
+ i9xx_pfit_disable(intel_crtc);
intel_disable_pll(dev_priv, pipe);
@@ -3906,22 +3977,23 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
return encoder->get_hw_state(encoder, &pipe);
}
-static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_crtc_compute_config(struct drm_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
- if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+ if (pipe_config->requested_mode.clock * 3
+ > IRONLAKE_FDI_FREQ * 4)
return false;
}
/* All interlaced capable intel hw wants timings in frames. Note though
* that intel_lvds_mode_fixup does some funny tricks with the crtc
* timings, so we need to be careful not to clobber these.*/
- if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+ if (!pipe_config->timings_set)
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
@@ -3931,6 +4003,14 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
return false;
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
+ pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
+ } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
+ /* only a 8bpc pipe, with 6bpc dither through the panel fitter
+ * for lvds. */
+ pipe_config->pipe_bpp = 8*3;
+ }
+
return true;
}
@@ -4004,26 +4084,36 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
}
static void
-intel_reduce_ratio(uint32_t *num, uint32_t *den)
+intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
{
- while (*num > 0xffffff || *den > 0xffffff) {
+ while (*num > DATA_LINK_M_N_MASK ||
+ *den > DATA_LINK_M_N_MASK) {
*num >>= 1;
*den >>= 1;
}
}
+static void compute_m_n(unsigned int m, unsigned int n,
+ uint32_t *ret_m, uint32_t *ret_n)
+{
+ *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
+ *ret_m = div_u64((uint64_t) m * *ret_n, n);
+ intel_reduce_m_n_ratio(ret_m, ret_n);
+}
+
void
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n)
{
m_n->tu = 64;
- m_n->gmch_m = bits_per_pixel * pixel_clock;
- m_n->gmch_n = link_clock * nlanes * 8;
- intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- m_n->link_m = pixel_clock;
- m_n->link_n = link_clock;
- intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+
+ compute_m_n(bits_per_pixel * pixel_clock,
+ link_clock * nlanes * 8,
+ &m_n->gmch_m, &m_n->gmch_n);
+
+ compute_m_n(pixel_clock, link_clock,
+ &m_n->link_m, &m_n->link_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4034,142 +4124,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-/**
- * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
- * @crtc: CRTC structure
- * @mode: requested mode
- *
- * A pipe may be connected to one or more outputs. Based on the depth of the
- * attached framebuffer, choose a good color depth to use on the pipe.
- *
- * If possible, match the pipe depth to the fb depth. In some cases, this
- * isn't ideal, because the connected output supports a lesser or restricted
- * set of depths. Resolve that here:
- * LVDS typically supports only 6bpc, so clamp down in that case
- * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
- * Displays may support a restricted set as well, check EDID and clamp as
- * appropriate.
- * DP may want to dither down to 6bpc to fit larger modes
- *
- * RETURNS:
- * Dithering requirement (i.e. false if display bpc and pipe bpc match,
- * true if they don't match).
- */
-static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- unsigned int *pipe_bpp,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
- unsigned int display_bpc = UINT_MAX, bpc;
-
- /* Walk the encoders & connectors on this crtc, get min bpc */
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
- if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
- unsigned int lvds_bpc;
-
- if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
- LVDS_A3_POWER_UP)
- lvds_bpc = 8;
- else
- lvds_bpc = 6;
-
- if (lvds_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
- display_bpc = lvds_bpc;
- }
- continue;
- }
-
- /* Not one of the known troublemakers, check the EDID */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
- if (connector->encoder != &intel_encoder->base)
- continue;
-
- /* Don't use an invalid EDID bpc value */
- if (connector->display_info.bpc &&
- connector->display_info.bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
- display_bpc = connector->display_info.bpc;
- }
- }
-
- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- /* Use VBT settings if we have an eDP panel */
- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
- if (edp_bpc && edp_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
- display_bpc = edp_bpc;
- }
- continue;
- }
-
- /*
- * HDMI is either 12 or 8, so if the display lets 10bpc sneak
- * through, clamp it down. (Note: >12bpc will be caught below.)
- */
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- if (display_bpc > 8 && display_bpc < 12) {
- DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
- display_bpc = 12;
- } else {
- DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
- display_bpc = 8;
- }
- }
- }
-
- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
- display_bpc = 6;
- }
-
- /*
- * We could just drive the pipe at the highest bpc all the time and
- * enable dithering as needed, but that costs bandwidth. So choose
- * the minimum value that expresses the full color range of the fb but
- * also stays within the max display bpc discovered above.
- */
-
- switch (fb->depth) {
- case 8:
- bpc = 8; /* since we go through a colormap */
- break;
- case 15:
- case 16:
- bpc = 6; /* min is 18bpp */
- break;
- case 24:
- bpc = 8;
- break;
- case 30:
- bpc = 10;
- break;
- case 48:
- bpc = 12;
- break;
- default:
- DRM_DEBUG("unsupported depth, assuming 24 bits\n");
- bpc = min((unsigned int)8, display_bpc);
- break;
- }
-
- display_bpc = min(display_bpc, bpc);
-
- DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
- bpc, display_bpc);
-
- *pipe_bpp = display_bpc * 3;
-
- return display_bpc != bpc;
-}
-
static int vlv_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4214,37 +4168,38 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
return refclk;
}
-static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock)
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
{
+ unsigned dotclock = crtc->config.adjusted_mode.clock;
+ struct dpll *clock = &crtc->config.dpll;
+
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
+ if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
+ } else if (dotclock >= 140500 && dotclock <= 200000) {
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
}
+
+ crtc->config.clock_set = true;
}
-static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
- intel_clock_t *clock,
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
intel_clock_t *reduced_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 fp, fp2 = 0;
+ struct dpll *clock = &crtc->config.dpll;
if (IS_PINEVIEW(dev)) {
fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
@@ -4260,26 +4215,29 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
I915_WRITE(FP0(pipe), fp);
- intel_crtc->lowfreq_avail = false;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
- intel_crtc->lowfreq_avail = true;
+ crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
}
}
-static void vlv_update_pll(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock, intel_clock_t *reduced_clock,
- int num_connectors)
+static void intel_dp_set_m_n(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ if (crtc->config.has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ else
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+}
+
+static void vlv_update_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
bool is_sdvo;
@@ -4287,8 +4245,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
mutex_lock(&dev_priv->dpio_lock);
- is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
@@ -4298,11 +4256,11 @@ static void vlv_update_pll(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
- bestn = clock->n;
- bestm1 = clock->m1;
- bestm2 = clock->m2;
- bestp1 = clock->p1;
- bestp2 = clock->p2;
+ bestn = crtc->config.dpll.n;
+ bestm1 = crtc->config.dpll.m1;
+ bestm2 = crtc->config.dpll.m2;
+ bestp1 = crtc->config.dpll.p1;
+ bestp2 = crtc->config.dpll.p2;
/*
* In Valleyview PLL and program lane counter registers are exposed
@@ -4334,8 +4292,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
I915_WRITE(DPLL(pipe), dpll);
@@ -4345,26 +4303,25 @@ static void vlv_update_pll(struct drm_crtc *crtc,
temp = 0;
if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (temp > 1)
- temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- else
- temp = 0;
+ temp = 0;
+ if (crtc->config.pixel_multiplier > 1) {
+ temp = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ }
}
I915_WRITE(DPLL_MD(pipe), temp);
POSTING_READ(DPLL_MD(pipe));
/* Now program lane control registers */
- if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
- || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
- {
+ if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
+ || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
}
- if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
- {
+
+ if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
@@ -4374,40 +4331,39 @@ static void vlv_update_pll(struct drm_crtc *crtc,
mutex_unlock(&dev_priv->dpio_lock);
}
-static void i9xx_update_pll(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock, intel_clock_t *reduced_clock,
+static void i9xx_update_pll(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock,
int num_connectors)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 dpll;
bool is_sdvo;
+ struct dpll *clock = &crtc->config.dpll;
- i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+ i9xx_update_pll_dividers(crtc, reduced_clock);
- is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
+
if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (pixel_multiplier > 1) {
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ if ((crtc->config.pixel_multiplier > 1) &&
+ (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
+ dpll |= (crtc->config.pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -4435,13 +4391,13 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -4452,12 +4408,12 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
I915_WRITE(DPLL(pipe), dpll);
@@ -4468,11 +4424,11 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4) {
u32 temp = 0;
if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (temp > 1)
- temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- else
- temp = 0;
+ temp = 0;
+ if (crtc->config.pixel_multiplier > 1) {
+ temp = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ }
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
@@ -4485,23 +4441,23 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
}
}
-static void i8xx_update_pll(struct drm_crtc *crtc,
+static void i8xx_update_pll(struct intel_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock, intel_clock_t *reduced_clock,
+ intel_clock_t *reduced_clock,
int num_connectors)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 dpll;
+ struct dpll *clock = &crtc->config.dpll;
- i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+ i9xx_update_pll_dividers(crtc, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -4512,11 +4468,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -4527,7 +4479,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
@@ -4552,7 +4504,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t vsyncshift;
if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -4603,22 +4555,92 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
}
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t pipeconf;
+
+ pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
+
+ if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
+ * core speed.
+ *
+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+ * pipe == 0 check?
+ */
+ if (intel_crtc->config.requested_mode.clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
+ else
+ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+ }
+
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
+ if (intel_crtc->config.has_dp_encoder) {
+ if (intel_crtc->config.dither) {
+ pipeconf |= PIPECONF_6BPC |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
+ if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
+ INTEL_OUTPUT_EDP)) {
+ if (intel_crtc->config.dither) {
+ pipeconf |= PIPECONF_6BPC |
+ PIPECONF_ENABLE |
+ I965_PIPECONF_ACTIVE;
+ }
+ }
+
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ } else {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
+ }
+
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
+ if (!IS_GEN2(dev) &&
+ intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ else
+ pipeconf |= PIPECONF_PROGRESSIVE;
+
+ if (IS_VALLEYVIEW(dev)) {
+ if (intel_crtc->config.limited_color_range)
+ pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ else
+ pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
+ }
+
+ I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
+ POSTING_READ(PIPECONF(intel_crtc->pipe));
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr, pipeconf;
+ u32 dspcntr;
bool ok, has_reduced_clock = false, is_sdvo = false;
- bool is_lvds = false, is_tv = false, is_dp = false;
+ bool is_lvds = false, is_tv = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
@@ -4637,9 +4659,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
}
num_connectors++;
@@ -4676,86 +4695,42 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
&clock,
&reduced_clock);
}
+ /* Compat-code for transition, will disappear. */
+ if (!intel_crtc->config.clock_set) {
+ intel_crtc->config.dpll.n = clock.n;
+ intel_crtc->config.dpll.m1 = clock.m1;
+ intel_crtc->config.dpll.m2 = clock.m2;
+ intel_crtc->config.dpll.p1 = clock.p1;
+ intel_crtc->config.dpll.p2 = clock.p2;
+ }
if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+ i9xx_adjust_sdvo_tv_clock(intel_crtc);
if (IS_GEN2(dev))
- i8xx_update_pll(crtc, adjusted_mode, &clock,
+ i8xx_update_pll(intel_crtc, adjusted_mode,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
else if (IS_VALLEYVIEW(dev))
- vlv_update_pll(crtc, mode, adjusted_mode, &clock,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ vlv_update_pll(intel_crtc);
else
- i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+ i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
-
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- if (pipe == 0)
- dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
-
- if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
- /* Enable pixel doubling when the dot clock is > 90% of the (display)
- * core speed.
- *
- * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
- * pipe == 0 check?
- */
- if (mode->clock >
- dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
+ if (!IS_VALLEYVIEW(dev)) {
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
- pipeconf &= ~PIPECONF_DOUBLE_WIDE;
- }
-
- /* default to 8bpc */
- pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
- if (is_dp) {
- if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_6BPC |
- PIPECONF_DITHER_EN |
- PIPECONF_DITHER_TYPE_SP;
- }
- }
-
- if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
- if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_6BPC |
- PIPECONF_ENABLE |
- I965_PIPECONF_ACTIVE;
- }
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
}
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- if (HAS_PIPE_CXSR(dev)) {
- if (intel_crtc->lowfreq_avail) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- } else {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
- }
- }
-
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
- if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- else
- pipeconf |= PIPECONF_PROGRESSIVE;
-
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
/* pipesrc and dspsize control the size that is scaled from,
@@ -4766,8 +4741,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ i9xx_set_pipeconf(intel_crtc);
+
intel_enable_pipe(dev_priv, pipe, false);
intel_wait_for_vblank(dev, pipe);
@@ -4782,12 +4757,26 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ return true;
+}
+
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
- u32 temp;
+ u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_pch_edp = false;
@@ -4830,70 +4819,109 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- temp = I915_READ(PCH_DREF_CONTROL);
+ val = I915_READ(PCH_DREF_CONTROL);
+
+ /* As we must carefully and slowly disable/enable each source in turn,
+ * compute the final state we want first and check if we need to
+ * make any changes at all.
+ */
+ final = val;
+ final &= ~DREF_NONSPREAD_SOURCE_MASK;
+ if (has_ck505)
+ final |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ final &= ~DREF_SSC_SOURCE_MASK;
+ final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ final &= ~DREF_SSC1_ENABLE;
+
+ if (has_panel) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_SSC1_ENABLE;
+
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ } else {
+ final |= DREF_SSC_SOURCE_DISABLE;
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+
+ if (final == val)
+ return;
+
/* Always enable nonspread source */
- temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ val &= ~DREF_NONSPREAD_SOURCE_MASK;
if (has_ck505)
- temp |= DREF_NONSPREAD_CK505_ENABLE;
+ val |= DREF_NONSPREAD_CK505_ENABLE;
else
- temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+ val |= DREF_NONSPREAD_SOURCE_ENABLE;
if (has_panel) {
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_ENABLE;
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_ENABLE;
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on panel\n");
- temp |= DREF_SSC1_ENABLE;
+ val |= DREF_SSC1_ENABLE;
} else
- temp &= ~DREF_SSC1_ENABLE;
+ val &= ~DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
- I915_WRITE(PCH_DREF_CONTROL, temp);
+ I915_WRITE(PCH_DREF_CONTROL, val);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on eDP\n");
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
}
else
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
- temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
+ I915_WRITE(PCH_DREF_CONTROL, val);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
DRM_DEBUG_KMS("Disabling SSC entirely\n");
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Turn off CPU output */
- temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
+ I915_WRITE(PCH_DREF_CONTROL, val);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
/* Turn off the SSC source */
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_DISABLE;
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
/* Turn off SSC1 */
- temp &= ~ DREF_SSC1_ENABLE;
+ val &= ~DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
+ I915_WRITE(PCH_DREF_CONTROL, val);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
+
+ BUG_ON(val != final);
}
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
@@ -4958,13 +4986,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
- tmp &= ~(0x3 << 6);
- tmp |= (1 << 6) | (1 << 0);
- intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
- }
-
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
tmp |= 0x7FFF;
@@ -5118,7 +5139,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val = I915_READ(PIPECONF(pipe));
val &= ~PIPECONF_BPC_MASK;
- switch (intel_crtc->bpp) {
+ switch (intel_crtc->config.pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -5146,7 +5167,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
else
val |= PIPECONF_PROGRESSIVE;
- if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ if (intel_crtc->config.limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
else
val &= ~PIPECONF_COLOR_RANGE_SELECT;
@@ -5162,8 +5183,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
* is supported, but eventually this should handle various
* RGB<->YCbCr scenarios as well.
*/
-static void intel_set_pipe_csc(struct drm_crtc *crtc,
- const struct drm_display_mode *adjusted_mode)
+static void intel_set_pipe_csc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5178,7 +5198,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
* consideration.
*/
- if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ if (intel_crtc->config.limited_color_range)
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
/*
@@ -5202,7 +5222,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
- if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ if (intel_crtc->config.limited_color_range)
postoff = (16 * (1 << 13) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -5213,7 +5233,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ if (intel_crtc->config.limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -5226,7 +5246,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc,
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t val;
val = I915_READ(PIPECONF(cpu_transcoder));
@@ -5303,7 +5323,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
}
if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+ i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
return true;
}
@@ -5344,7 +5364,7 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
return false;
}
- if (dev_priv->num_pipe == 2)
+ if (INTEL_INFO(dev)->num_pipes == 2)
return true;
switch (intel_crtc->pipe) {
@@ -5401,87 +5421,87 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
return bps / (link_bw * 8) + 1;
}
-static void ironlake_set_m_n(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- struct intel_encoder *intel_encoder, *edp_encoder = NULL;
- struct intel_link_m_n m_n = {0};
- int target_clock, pixel_multiplier, lane, link_bw;
- bool is_dp = false, is_cpu_edp = false;
+ int pipe = crtc->pipe;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (!intel_encoder_is_pch_edp(&intel_encoder->base))
- is_cpu_edp = true;
- edp_encoder = intel_encoder;
- break;
- }
- }
+ I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
+ I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
+ I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
+}
+
+void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ enum transcoder transcoder = crtc->config.cpu_transcoder;
- /* FDI link */
- pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- lane = 0;
- /* CPU eDP doesn't require FDI link, so just set DP M/N
- according to current link config */
- if (is_cpu_edp) {
- intel_edp_link_config(edp_encoder, &lane, &link_bw);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
} else {
- /* FDI is a binary signal running at ~2.7GHz, encoding
- * each output octet as 10 bits. The actual frequency
- * is stored as a divider into a 100MHz clock, and the
- * mode pixel clock is stored in units of 1KHz.
- * Hence the bw of each lane in terms of the mode signal
- * is:
- */
- link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
+ I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
+ I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
}
+}
+
+static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct intel_link_m_n m_n = {0};
+ int target_clock, lane, link_bw;
+
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
- /* [e]DP over FDI requires target mode clock instead of link clock. */
- if (edp_encoder)
- target_clock = intel_edp_target_clock(edp_encoder, mode);
- else if (is_dp)
- target_clock = mode->clock;
+ if (intel_crtc->config.pixel_target_clock)
+ target_clock = intel_crtc->config.pixel_target_clock;
else
target_clock = adjusted_mode->clock;
- if (!lane)
- lane = ironlake_get_lanes_required(target_clock, link_bw,
- intel_crtc->bpp);
+ lane = ironlake_get_lanes_required(target_clock, link_bw,
+ intel_crtc->config.pipe_bpp);
intel_crtc->fdi_lanes = lane;
- if (pixel_multiplier > 1)
- link_bw *= pixel_multiplier;
- intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
+ if (intel_crtc->config.pixel_multiplier > 1)
+ link_bw *= intel_crtc->config.pixel_multiplier;
+ intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
+ link_bw, &m_n);
- I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+ intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
}
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
- struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock, u32 fp)
+ intel_clock_t *clock, u32 *fp,
+ intel_clock_t *reduced_clock, u32 *fp2)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
uint32_t dpll;
- int factor, pixel_multiplier, num_connectors = 0;
+ int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false, is_tv = false;
- bool is_dp = false, is_cpu_edp = false;
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
@@ -5497,14 +5517,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (!intel_encoder_is_pch_edp(&intel_encoder->base))
- is_cpu_edp = true;
- break;
}
num_connectors++;
@@ -5515,13 +5527,16 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
- intel_is_dual_link_lvds(dev))
+ (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
if (clock->m < factor * clock->n)
- fp |= FP_CB_TUNE;
+ *fp |= FP_CB_TUNE;
+
+ if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
+ *fp2 |= FP_CB_TUNE;
dpll = 0;
@@ -5530,13 +5545,14 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
- pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (pixel_multiplier > 1) {
- dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ if (intel_crtc->config.pixel_multiplier > 1) {
+ dpll |= (intel_crtc->config.pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
- if (is_dp && !is_cpu_edp)
+ if (intel_crtc->config.has_dp_encoder &&
+ intel_crtc->config.has_pch_encoder)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5574,21 +5590,22 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
}
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ bool is_lvds = false;
struct intel_encoder *encoder;
int ret;
bool dither, fdi_config_ok;
@@ -5598,14 +5615,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (!intel_encoder_is_pch_edp(&encoder->base))
- is_cpu_edp = true;
- break;
}
num_connectors++;
@@ -5614,19 +5623,28 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+ intel_crtc->config.cpu_transcoder = pipe;
+
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
+ /* Compat-code for transition, will disappear. */
+ if (!intel_crtc->config.clock_set) {
+ intel_crtc->config.dpll.n = clock.n;
+ intel_crtc->config.dpll.m1 = clock.m1;
+ intel_crtc->config.dpll.m2 = clock.m2;
+ intel_crtc->config.dpll.p1 = clock.p1;
+ intel_crtc->config.dpll.p2 = clock.p2;
+ }
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
/* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
- adjusted_mode);
+ dither = intel_crtc->config.dither;
if (is_lvds && dev_priv->lvds_dither)
dither = true;
@@ -5635,13 +5653,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
- dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+ dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
+ has_reduced_clock ? &fp2 : NULL);
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (!is_cpu_edp) {
+ if (intel_crtc->config.has_pch_encoder) {
struct intel_pch_pll *pll;
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5653,8 +5672,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else
intel_put_pch_pll(intel_crtc);
- if (is_dp && !is_cpu_edp)
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
@@ -5689,7 +5708,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Note, this also computes intel_crtc->fdi_lanes which is used below in
* ironlake_check_fdi_lanes. */
- ironlake_set_m_n(crtc, mode, adjusted_mode);
+ intel_crtc->fdi_lanes = 0;
+ if (intel_crtc->config.has_pch_encoder)
+ ironlake_fdi_set_m_n(crtc);
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
@@ -5710,6 +5731,23 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
return fdi_config_ok ? ret : -EINVAL;
}
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
+ pipe_config->has_pch_encoder = true;
+
+ return true;
+}
+
static void haswell_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5740,29 +5778,26 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
}
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
- bool is_dp = false, is_cpu_edp = false;
+ bool is_cpu_edp = false;
struct intel_encoder *encoder;
int ret;
bool dither;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
case INTEL_OUTPUT_EDP:
- is_dp = true;
if (!intel_encoder_is_pch_edp(&encoder->base))
is_cpu_edp = true;
break;
@@ -5772,9 +5807,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_cpu_edp)
- intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
else
- intel_crtc->cpu_transcoder = pipe;
+ intel_crtc->config.cpu_transcoder = pipe;
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
@@ -5783,7 +5818,7 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
num_connectors, pipe_name(pipe));
- WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+ WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
@@ -5795,25 +5830,24 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
/* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
- adjusted_mode);
+ dither = intel_crtc->config.dither;
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- if (is_dp && !is_cpu_edp)
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
intel_crtc->lowfreq_avail = false;
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
- if (!is_dp || is_cpu_edp)
- ironlake_set_m_n(crtc, mode, adjusted_mode);
+ if (intel_crtc->config.has_pch_encoder)
+ ironlake_fdi_set_m_n(crtc);
haswell_set_pipeconf(crtc, adjusted_mode, dither);
- intel_set_pipe_csc(crtc, adjusted_mode);
+ intel_set_pipe_csc(crtc);
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
@@ -5828,9 +5862,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ /*
+ * aswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
+ */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
+ I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
+ pipe_config->has_pch_encoder = true;
+
+
+ return true;
+}
+
static int intel_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
@@ -5839,13 +5896,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
drm_vblank_pre_modeset(dev, pipe);
- ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, fb);
+ ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
+
drm_vblank_post_modeset(dev, pipe);
if (ret != 0)
@@ -5856,8 +5916,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
- encoder_funcs = encoder->base.helper_private;
- encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ if (encoder->mode_set) {
+ encoder->mode_set(encoder);
+ } else {
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
}
return 0;
@@ -6325,13 +6389,24 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
+ unsigned alignment;
+
if (obj->tiling_mode) {
DRM_ERROR("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
- ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
+ /* Note that the w/a also requires 2 PTE of padding following
+ * the bo. We currently fill all unused PTE with the shadow
+ * page and so we should always have valid PTE following the
+ * cursor preventing the VT-d warning.
+ */
+ alignment = 0;
+ if (need_vtd_wa(dev))
+ alignment = 64*1024;
+
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_locked;
@@ -6436,20 +6511,6 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
intel_crtc_load_lut(crtc);
}
-/**
- * Get a pipe with a simple mode set on it for doing load-based monitor
- * detection.
- *
- * It will be up to the load-detect code to adjust the pipe as appropriate for
- * its requirements. The pipe will be connected to no other encoders.
- *
- * Currently this code will only succeed if there is a pipe with no encoders
- * configured for it. In the future, it could choose to temporarily disable
- * some outputs to free up a pipe for its use.
- *
- * \return crtc, or NULL if no pipes are available.
- */
-
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -6776,7 +6837,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
@@ -6954,7 +7015,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- struct drm_i915_gem_object *obj;
unsigned long flags;
/* Ignore early vblank irqs */
@@ -6984,8 +7044,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj = work->old_fb_obj;
-
wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
@@ -7473,19 +7531,93 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
}
}
-static struct drm_display_mode *
-intel_modeset_adjusted_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+static int
+pipe_config_set_bpp(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ int bpp;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ bpp = 8*3; /* since we go through a colormap */
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ /* checked in intel_framebuffer_init already */
+ if (WARN_ON(INTEL_INFO(dev)->gen > 3))
+ return -EINVAL;
+ case DRM_FORMAT_RGB565:
+ bpp = 6*3; /* min is 18bpp */
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ /* checked in intel_framebuffer_init already */
+ if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+ return -EINVAL;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ bpp = 8*3;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ /* checked in intel_framebuffer_init already */
+ if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+ return -EINVAL;
+ bpp = 10*3;
+ break;
+ /* TODO: gen4+ supports 16 bpc floating point, too. */
+ default:
+ DRM_DEBUG_KMS("unsupported depth\n");
+ return -EINVAL;
+ }
+
+ pipe_config->pipe_bpp = bpp;
+
+ /* Clamp display bpp to EDID value */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (connector->encoder && connector->encoder->crtc != crtc)
+ continue;
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc * 3 < bpp) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
+ bpp, connector->display_info.bpc*3);
+ pipe_config->pipe_bpp = connector->display_info.bpc*3;
+ }
+ }
+
+ return bpp;
+}
+
+static struct intel_crtc_config *
+intel_modeset_pipe_config(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
+ struct intel_crtc_config *pipe_config;
+ int plane_bpp;
- adjusted_mode = drm_mode_duplicate(dev, mode);
- if (!adjusted_mode)
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config)
return ERR_PTR(-ENOMEM);
+ drm_mode_copy(&pipe_config->adjusted_mode, mode);
+ drm_mode_copy(&pipe_config->requested_mode, mode);
+
+ plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
+ if (plane_bpp < 0)
+ goto fail;
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -7495,23 +7627,38 @@ intel_modeset_adjusted_mode(struct drm_crtc *crtc,
if (&encoder->new_crtc->base != crtc)
continue;
+
+ if (encoder->compute_config) {
+ if (!(encoder->compute_config(encoder, pipe_config))) {
+ DRM_DEBUG_KMS("Encoder config failure\n");
+ goto fail;
+ }
+
+ continue;
+ }
+
encoder_funcs = encoder->base.helper_private;
- if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
- adjusted_mode))) {
+ if (!(encoder_funcs->mode_fixup(&encoder->base,
+ &pipe_config->requested_mode,
+ &pipe_config->adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
goto fail;
}
}
- if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+ if (!(intel_crtc_compute_config(crtc, pipe_config))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto fail;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- return adjusted_mode;
+ pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+ DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
+ plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+ return pipe_config;
fail:
- drm_mode_destroy(dev, adjusted_mode);
+ kfree(pipe_config);
return ERR_PTR(-EINVAL);
}
@@ -7589,22 +7736,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
if (crtc->enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
- /* We only support modeset on one single crtc, hence we need to do that
- * only for the passed in crtc iff we change anything else than just
- * disable crtcs.
- *
- * This is actually not true, to be fully compatible with the old crtc
- * helper we automatically disable _any_ output (i.e. doesn't need to be
- * connected to the crtc we're modesetting on) if it's disconnected.
- * Which is a rather nutty api (since changed the output configuration
- * without userspace's explicit request can lead to confusion), but
- * alas. Hence we currently need to modeset on all pipes we prepare. */
+ /*
+ * For simplicity do a full modeset on any pipe where the output routing
+ * changed. We could be more clever, but that would require us to be
+ * more careful with calling the relevant encoder->mode_set functions.
+ */
if (*prepare_pipes)
*modeset_pipes = *prepare_pipes;
/* ... and mask these out. */
*modeset_pipes &= ~(*disable_pipes);
*prepare_pipes &= ~(*disable_pipes);
+
+ /*
+ * HACK: We don't (yet) fully support global modesets. intel_set_config
+ * obies this rule, but the modeset restore mode of
+ * intel_modeset_setup_hw_state does not.
+ */
+ *modeset_pipes &= 1 << intel_crtc->pipe;
+ *prepare_pipes &= 1 << intel_crtc->pipe;
}
static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -7673,12 +7823,29 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
base.head) \
if (mask & (1 <<(intel_crtc)->pipe)) \
+static bool
+intel_pipe_config_compare(struct intel_crtc_config *current_config,
+ struct intel_crtc_config *pipe_config)
+{
+ if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
+ DRM_ERROR("mismatch in has_pch_encoder "
+ "(expected %i, found %i)\n",
+ current_config->has_pch_encoder,
+ pipe_config->has_pch_encoder);
+ return false;
+ }
+
+ return true;
+}
+
void
intel_modeset_check_state(struct drm_device *dev)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
+ struct intel_crtc_config pipe_config;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
@@ -7767,17 +7934,27 @@ intel_modeset_check_state(struct drm_device *dev)
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
- assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+ memset(&pipe_config, 0, sizeof(pipe_config));
+ active = dev_priv->display.get_pipe_config(crtc,
+ &pipe_config);
+ WARN(crtc->active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", crtc->active, active);
+
+ WARN(active &&
+ !intel_pipe_config_compare(&crtc->config, &pipe_config),
+ "pipe state doesn't match!\n");
}
}
-int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int __intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
+ struct drm_display_mode *saved_mode, *saved_hwmode;
+ struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
@@ -7790,12 +7967,6 @@ int intel_set_mode(struct drm_crtc *crtc,
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
- DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
- modeset_pipes, prepare_pipes, disable_pipes);
-
- for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
- intel_crtc_disable(&intel_crtc->base);
-
*saved_hwmode = crtc->hwmode;
*saved_mode = crtc->mode;
@@ -7804,15 +7975,22 @@ int intel_set_mode(struct drm_crtc *crtc,
* Hence simply check whether any bit is set in modeset_pipes in all the
* pieces of code that are not yet converted to deal with mutliple crtcs
* changing their mode at the same time. */
- adjusted_mode = NULL;
if (modeset_pipes) {
- adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
- if (IS_ERR(adjusted_mode)) {
- ret = PTR_ERR(adjusted_mode);
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ pipe_config = NULL;
+
goto out;
}
}
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ modeset_pipes, prepare_pipes, disable_pipes);
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
if (intel_crtc->base.enabled)
dev_priv->display.crtc_disable(&intel_crtc->base);
@@ -7821,8 +7999,14 @@ int intel_set_mode(struct drm_crtc *crtc,
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
*/
- if (modeset_pipes)
+ if (modeset_pipes) {
+ enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
crtc->mode = *mode;
+ /* mode_set/enable/disable functions rely on a correct pipe
+ * config. */
+ to_intel_crtc(crtc)->config = *pipe_config;
+ to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
+ }
/* Only after disabling all output pipelines that will be changed can we
* update the the output configuration. */
@@ -7836,7 +8020,6 @@ int intel_set_mode(struct drm_crtc *crtc,
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
ret = intel_crtc_mode_set(&intel_crtc->base,
- mode, adjusted_mode,
x, y, fb);
if (ret)
goto done;
@@ -7848,7 +8031,7 @@ int intel_set_mode(struct drm_crtc *crtc,
if (modeset_pipes) {
/* Store real post-adjustment hardware mode. */
- crtc->hwmode = *adjusted_mode;
+ crtc->hwmode = pipe_config->adjusted_mode;
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
@@ -7859,19 +8042,31 @@ int intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
- drm_mode_destroy(dev, adjusted_mode);
if (ret && crtc->enabled) {
crtc->hwmode = *saved_hwmode;
crtc->mode = *saved_mode;
- } else {
- intel_modeset_check_state(dev);
}
out:
+ kfree(pipe_config);
kfree(saved_mode);
return ret;
}
+int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ int ret;
+
+ ret = __intel_set_mode(crtc, mode, x, y, fb);
+
+ if (ret == 0)
+ intel_modeset_check_state(crtc->dev);
+
+ return ret;
+}
+
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
@@ -7959,10 +8154,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
config->mode_changed = true;
} else if (set->fb == NULL) {
config->mode_changed = true;
- } else if (set->fb->depth != set->crtc->fb->depth) {
- config->mode_changed = true;
- } else if (set->fb->bits_per_pixel !=
- set->crtc->fb->bits_per_pixel) {
+ } else if (set->fb->pixel_format !=
+ set->crtc->fb->pixel_format) {
config->mode_changed = true;
} else
config->fb_changed = true;
@@ -8145,6 +8338,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
goto fail;
}
} else if (config->fb_changed) {
+ intel_crtc_wait_for_pending_flips(set->crtc);
+
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
}
@@ -8221,7 +8416,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- intel_crtc->cpu_transcoder = pipe;
+ intel_crtc->config.cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
@@ -8232,8 +8427,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc->bpp = 24; /* default for pre-Ironlake */
-
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
}
@@ -8314,7 +8507,7 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ if (!IS_ULT(dev))
intel_crt_init(dev);
if (HAS_DDI(dev)) {
@@ -8343,20 +8536,20 @@ static void intel_setup_outputs(struct drm_device *dev)
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
- if (I915_READ(HDMIB) & PORT_DETECTED) {
+ if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
- intel_hdmi_init(dev, HDMIB, PORT_B);
+ intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_B, PORT_B);
}
- if (I915_READ(HDMIC) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMIC, PORT_C);
+ if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+ intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMID, PORT_D);
+ if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+ intel_hdmi_init(dev, PCH_HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
@@ -8368,24 +8561,21 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
- if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
- intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+ PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
-
- if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
-
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
- if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, SDVOB, true);
+ found = intel_sdvo_init(dev, GEN3_SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, SDVOB, PORT_B);
+ intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
@@ -8396,16 +8586,16 @@ static void intel_setup_outputs(struct drm_device *dev)
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, SDVOC, false);
+ found = intel_sdvo_init(dev, GEN3_SDVOC, false);
}
- if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+ if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, SDVOC, PORT_C);
+ intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
@@ -8572,20 +8762,22 @@ static void intel_init_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* We always want a DPMS function */
if (HAS_DDI(dev)) {
+ dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = haswell_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -8828,7 +9020,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i, ret;
+ int i, j, ret;
drm_mode_config_init(dev);
@@ -8844,6 +9036,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev);
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return;
+
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -8859,13 +9054,17 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
+ INTEL_INFO(dev)->num_pipes,
+ INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
- for (i = 0; i < dev_priv->num_pipe; i++) {
+ for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
intel_crtc_init(dev, i);
- ret = intel_plane_init(dev, i);
- if (ret)
- DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
+ for (j = 0; j < dev_priv->num_plane; j++) {
+ ret = intel_plane_init(dev, i, j);
+ if (ret)
+ DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
+ i, j, ret);
+ }
}
intel_cpu_pll_init(dev);
@@ -8918,10 +9117,11 @@ static void intel_enable_pipe_a(struct drm_device *dev)
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
- if (dev_priv->num_pipe == 1)
+ if (INTEL_INFO(dev)->num_pipes == 1)
return true;
reg = DSPCNTR(!crtc->plane);
@@ -8941,7 +9141,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->cpu_transcoder);
+ reg = PIPECONF(crtc->config.cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* We need to sanitize the plane -> pipe mapping first because this will
@@ -9077,6 +9277,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 tmp;
+ struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -9096,24 +9297,32 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
+ default:
+ /* A bogus value has been programmed, disable
+ * the transcoder */
+ WARN(1, "Bogus eDP source %08x\n", tmp);
+ intel_ddi_disable_transcoder_func(dev_priv,
+ TRANSCODER_EDP);
+ goto setup_pipes;
}
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- crtc->cpu_transcoder = TRANSCODER_EDP;
+ crtc->config.cpu_transcoder = TRANSCODER_EDP;
DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
pipe_name(pipe));
}
}
- for_each_pipe(pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+setup_pipes:
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ enum transcoder tmp = crtc->config.cpu_transcoder;
+ memset(&crtc->config, 0, sizeof(crtc->config));
+ crtc->config.cpu_transcoder = tmp;
- tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
- if (tmp & PIPECONF_ENABLE)
- crtc->active = true;
- else
- crtc->active = false;
+ crtc->active = dev_priv->display.get_pipe_config(crtc,
+ &crtc->config);
crtc->base.enabled = crtc->active;
@@ -9172,9 +9381,19 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
if (force_restore) {
+ /*
+ * We need to use raw interfaces for restoring state to avoid
+ * checking (bogus) intermediate states.
+ */
for_each_pipe(pipe) {
- intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct drm_crtc *crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+
+ __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->fb);
}
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ intel_plane_restore(plane);
i915_redisable_vga(dev);
} else {
@@ -9236,6 +9455,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
+ /* destroy backlight, if any, before the connectors */
+ intel_panel_destroy_backlight(dev);
+
drm_mode_config_cleanup(dev);
intel_cleanup_overlay(dev);
@@ -9323,15 +9545,24 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
- error->cursor[i].control = I915_READ(CURCNTR(i));
- error->cursor[i].position = I915_READ(CURPOS(i));
- error->cursor[i].base = I915_READ(CURBASE(i));
+ if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+ } else {
+ error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
+ error->cursor[i].position = I915_READ(CURPOS_IVB(i));
+ error->cursor[i].base = I915_READ(CURBASE_IVB(i));
+ }
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
- error->plane[i].size = I915_READ(DSPSIZE(i));
- error->plane[i].pos = I915_READ(DSPPOS(i));
- error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen <= 3) {
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos = I915_READ(DSPPOS(i));
+ }
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ error->plane[i].addr = I915_READ(DSPADDR(i));
if (INTEL_INFO(dev)->gen >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
@@ -9355,10 +9586,9 @@ intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+ seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
@@ -9373,9 +9603,12 @@ intel_display_print_error_state(struct seq_file *m,
seq_printf(m, "Plane [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
- seq_printf(m, " POS: %08x\n", error->plane[i].pos);
- seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen <= 3) {
+ seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ seq_printf(m, " POS: %08x\n", error->plane[i].pos);
+ }
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c3f5bd8..fb2fbc1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -109,29 +109,6 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
static void intel_dp_link_down(struct intel_dp *intel_dp);
-void
-intel_edp_link_config(struct intel_encoder *intel_encoder,
- int *lane_num, int *link_bw)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
- *lane_num = intel_dp->lane_count;
- *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
-}
-
-int
-intel_edp_target_clock(struct intel_encoder *intel_encoder,
- struct drm_display_mode *mode)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- struct intel_connector *intel_connector = intel_dp->attached_connector;
-
- if (intel_connector->panel.fixed_mode)
- return intel_connector->panel.fixed_mode->clock;
- else
- return mode->clock;
-}
-
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
@@ -177,34 +154,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return (max_link_clock * max_lanes * 8) / 10;
}
-static bool
-intel_dp_adjust_dithering(struct intel_dp *intel_dp,
- struct drm_display_mode *mode,
- bool adjust_mode)
-{
- int max_link_clock =
- drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
- int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
- int max_rate, mode_rate;
-
- mode_rate = intel_dp_link_required(mode->clock, 24);
- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-
- if (mode_rate > max_rate) {
- mode_rate = intel_dp_link_required(mode->clock, 18);
- if (mode_rate > max_rate)
- return false;
-
- if (adjust_mode)
- mode->private_flags
- |= INTEL_MODE_DP_FORCE_6BPC;
-
- return true;
- }
-
- return true;
-}
-
static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -212,6 +161,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ int target_clock = mode->clock;
+ int max_rate, mode_rate, max_lanes, max_link_clock;
if (is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
@@ -219,9 +170,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
+
+ target_clock = fixed_mode->clock;
}
- if (!intel_dp_adjust_dithering(intel_dp, mode, false))
+ max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(target_clock, 18);
+
+ if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -294,16 +253,20 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg;
- return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ return (I915_READ(pp_stat_reg) & PP_ON) != 0;
}
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_ctrl_reg;
- return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
}
static void
@@ -311,14 +274,19 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg, pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(PCH_PP_STATUS),
- I915_READ(PCH_PP_CONTROL));
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
}
}
@@ -328,29 +296,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ch_ctl = intel_dp->output_reg + 0x10;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
- if (IS_HASWELL(dev)) {
- switch (intel_dig_port->port) {
- case PORT_A:
- ch_ctl = DPA_AUX_CH_CTL;
- break;
- case PORT_B:
- ch_ctl = PCH_DPB_AUX_CH_CTL;
- break;
- case PORT_C:
- ch_ctl = PCH_DPC_AUX_CH_CTL;
- break;
- case PORT_D:
- ch_ctl = PCH_DPD_AUX_CH_CTL;
- break;
- default:
- BUG();
- }
- }
-
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
@@ -370,11 +319,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- uint32_t output_reg = intel_dp->output_reg;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t ch_data = ch_ctl + 4;
int i, ret, recv_bytes;
uint32_t status;
@@ -388,29 +336,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
*/
pm_qos_update_request(&dev_priv->pm_qos, 0);
- if (IS_HASWELL(dev)) {
- switch (intel_dig_port->port) {
- case PORT_A:
- ch_ctl = DPA_AUX_CH_CTL;
- ch_data = DPA_AUX_CH_DATA1;
- break;
- case PORT_B:
- ch_ctl = PCH_DPB_AUX_CH_CTL;
- ch_data = PCH_DPB_AUX_CH_DATA1;
- break;
- case PORT_C:
- ch_ctl = PCH_DPC_AUX_CH_CTL;
- ch_data = PCH_DPC_AUX_CH_DATA1;
- break;
- case PORT_D:
- ch_ctl = PCH_DPD_AUX_CH_CTL;
- ch_data = PCH_DPD_AUX_CH_DATA1;
- break;
- default:
- BUG();
- }
- }
-
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
@@ -428,10 +353,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- } else if (HAS_PCH_SPLIT(dev))
+ } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ /* Workaround for non-ULT HSW */
+ aux_clock_divider = 74;
+ } else if (HAS_PCH_SPLIT(dev)) {
aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
- else
+ } else {
aux_clock_divider = intel_hrawclk(dev) / 2;
+ }
if (IS_GEN6(dev))
precharge = 3;
@@ -732,18 +661,26 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
}
bool
-intel_dp_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->requested_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ int target_clock, link_avail, link_clock;
+
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
+ pipe_config->has_pch_encoder = true;
+
+ pipe_config->has_dp_encoder = true;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -752,6 +689,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
}
+ /* We need to take the panel's fixed mode into account. */
+ target_clock = adjusted_mode->clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
@@ -760,11 +699,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], adjusted_mode->clock);
- if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
- return false;
+ /* Walk through all bpp values. Luckily they're all nicely spaced with 2
+ * bpc in between. */
+ bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+ for (; bpp >= 6*3; bpp -= 2*3) {
+ mode_rate = intel_dp_link_required(target_clock, bpp);
+
+ for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ goto found;
+ }
+ }
+ }
+ }
- bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ return false;
+found:
if (intel_dp->color_range_auto) {
/*
* See:
@@ -778,104 +734,38 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
}
if (intel_dp->color_range)
- adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
-
- mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
-
- for (clock = 0; clock <= max_clock; clock++) {
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- int link_bw_clock =
- drm_dp_bw_code_to_link_rate(bws[clock]);
- int link_avail = intel_dp_max_data_rate(link_bw_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- intel_dp->link_bw = bws[clock];
- intel_dp->lane_count = lane_count;
- adjusted_mode->clock = link_bw_clock;
- DRM_DEBUG_KMS("DP link bw %02x lane "
- "count %d clock %d bpp %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock, bpp);
- DRM_DEBUG_KMS("DP link bw required %i available %i\n",
- mode_rate, link_avail);
- return true;
- }
- }
- }
-
- return false;
-}
+ pipe_config->limited_color_range = true;
-void
-intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
- struct intel_dp *intel_dp;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4;
- struct intel_link_m_n m_n;
- int pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- int target_clock;
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->pixel_target_clock = target_clock;
- /*
- * Find the lane count in the intel_encoder private
- */
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- {
- lane_count = intel_dp->lane_count;
- break;
- }
- }
-
- target_clock = mode->clock;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- target_clock = intel_edp_target_clock(intel_encoder,
- mode);
- break;
- }
- }
+ intel_link_compute_m_n(bpp, lane_count,
+ target_clock, adjusted_mode->clock,
+ &pipe_config->dp_m_n);
/*
- * Compute the GMCH and Link ratios. The '3' here is
- * the number of bytes_per_pixel post-LUT, which we always
- * set up for 8-bits of R/G/B, or 3 bytes total.
+ * XXX: We have a strange regression where using the vbt edp bpp value
+ * for the link bw computation results in black screens, the panel only
+ * works when we do the computation at the usual 24bpp (but still
+ * requires us to use 18bpp). Until that's fully debugged, stay
+ * bug-for-bug compatible with the old code.
*/
- intel_link_compute_m_n(intel_crtc->bpp, lane_count,
- target_clock, adjusted_mode->clock, &m_n);
-
- if (IS_HASWELL(dev)) {
- I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
- TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
- } else if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
- I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
- } else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- } else {
- I915_WRITE(PIPE_GMCH_DATA_M(pipe),
- TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
- I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+ if (is_edp(intel_dp) && dev_priv->edp.bpp) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
+ bpp, dev_priv->edp.bpp);
+ bpp = min_t(int, bpp, dev_priv->edp.bpp);
}
+ pipe_config->pipe_bpp = bpp;
+
+ return true;
}
void intel_dp_init_link_config(struct intel_dp *intel_dp)
@@ -994,7 +884,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1009,7 +899,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
- if (is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
/* don't miss out required setting for eDP */
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
@@ -1020,7 +910,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
- if (is_cpu_edp(intel_dp))
+ if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
@@ -1039,16 +929,20 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg, pp_ctrl_reg;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
- mask, value,
- I915_READ(PCH_PP_STATUS),
- I915_READ(PCH_PP_CONTROL));
+ mask, value,
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
- if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
+ if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
- I915_READ(PCH_PP_STATUS),
- I915_READ(PCH_PP_CONTROL));
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
}
}
@@ -1075,9 +969,15 @@ static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
* is locked
*/
-static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
+static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
- u32 control = I915_READ(PCH_PP_CONTROL);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 control;
+ u32 pp_ctrl_reg;
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ control = I915_READ(pp_ctrl_reg);
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
@@ -1089,6 +989,7 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_stat_reg, pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
@@ -1107,13 +1008,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
- pp = ironlake_get_pp_control(dev_priv);
+ pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
- I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
@@ -1128,19 +1032,23 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_stat_reg, pp_ctrl_reg;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
- pp = ironlake_get_pp_control(dev_priv);
+ pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- /* Make sure sequencer is idle before allowing subsequent activity */
- DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
- I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
msleep(intel_dp->panel_power_down_delay);
}
}
@@ -1184,6 +1092,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
@@ -1197,7 +1106,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
ironlake_wait_panel_power_cycle(intel_dp);
- pp = ironlake_get_pp_control(dev_priv);
+ pp = ironlake_get_pp_control(intel_dp);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@@ -1209,8 +1118,10 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
ironlake_wait_panel_on(intel_dp);
@@ -1226,6 +1137,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
@@ -1234,12 +1146,15 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
- pp = ironlake_get_pp_control(dev_priv);
+ pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
intel_dp->want_panel_vdd = false;
@@ -1253,6 +1168,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
+ u32 pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
@@ -1265,10 +1181,13 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* allowing it to appear.
*/
msleep(intel_dp->backlight_on_delay);
- pp = ironlake_get_pp_control(dev_priv);
+ pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
intel_panel_enable_backlight(dev, pipe);
}
@@ -1278,6 +1197,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
@@ -1285,10 +1205,13 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
intel_panel_disable_backlight(dev);
DRM_DEBUG_KMS("\n");
- pp = ironlake_get_pp_control(dev_priv);
+ pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
msleep(intel_dp->backlight_off_delay);
}
@@ -1384,7 +1307,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
return false;
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
*pipe = PORT_TO_PIPE(tmp);
@@ -1441,10 +1364,12 @@ static void intel_disable_dp(struct intel_encoder *encoder)
static void intel_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
if (is_cpu_edp(intel_dp)) {
intel_dp_link_down(intel_dp);
- ironlake_edp_pll_off(intel_dp);
+ if (!IS_VALLEYVIEW(dev))
+ ironlake_edp_pll_off(intel_dp);
}
}
@@ -1470,8 +1395,9 @@ static void intel_enable_dp(struct intel_encoder *encoder)
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
- if (is_cpu_edp(intel_dp))
+ if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
ironlake_edp_pll_on(intel_dp);
}
@@ -1548,7 +1474,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1756,7 +1682,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
@@ -1787,7 +1713,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
int ret;
uint32_t temp;
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2311,6 +2237,16 @@ g4x_dp_detect(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ enum drm_connector_status status;
+
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
+
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS;
@@ -2492,6 +2428,9 @@ intel_dp_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_dp->color_range_auto;
+ uint32_t old_range = intel_dp->color_range;
+
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_dp->color_range_auto = true;
@@ -2507,6 +2446,11 @@ intel_dp_set_property(struct drm_connector *connector,
default:
return -EINVAL;
}
+
+ if (old_auto == intel_dp->color_range_auto &&
+ old_range == intel_dp->color_range)
+ return 0;
+
goto done;
}
@@ -2538,17 +2482,14 @@ done:
static void
intel_dp_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
- if (is_edp(intel_dp)) {
- intel_panel_destroy_backlight(dev);
+ if (is_edp(intel_dp))
intel_panel_fini(&intel_connector->panel);
- }
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -2573,7 +2514,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .mode_fixup = intel_dp_mode_fixup,
.mode_set = intel_dp_mode_set,
};
@@ -2669,15 +2609,28 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
u32 pp_on, pp_off, pp_div, pp;
+ int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_control_reg = PCH_PP_CONTROL;
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_div_reg = PCH_PP_DIVISOR;
+ } else {
+ pp_control_reg = PIPEA_PP_CONTROL;
+ pp_on_reg = PIPEA_PP_ON_DELAYS;
+ pp_off_reg = PIPEA_PP_OFF_DELAYS;
+ pp_div_reg = PIPEA_PP_DIVISOR;
+ }
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
- pp = ironlake_get_pp_control(dev_priv);
- I915_WRITE(PCH_PP_CONTROL, pp);
+ pp = ironlake_get_pp_control(intel_dp);
+ I915_WRITE(pp_control_reg, pp);
- pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
- pp_div = I915_READ(PCH_PP_DIVISOR);
+ pp_on = I915_READ(pp_on_reg);
+ pp_off = I915_READ(pp_off_reg);
+ pp_div = I915_READ(pp_div_reg);
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -2752,7 +2705,22 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on, pp_off, pp_div;
+ u32 pp_on, pp_off, pp_div, port_sel = 0;
+ int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+ int pp_on_reg, pp_off_reg, pp_div_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_div_reg = PCH_PP_DIVISOR;
+ } else {
+ pp_on_reg = PIPEA_PP_ON_DELAYS;
+ pp_off_reg = PIPEA_PP_OFF_DELAYS;
+ pp_div_reg = PIPEA_PP_DIVISOR;
+ }
+
+ if (IS_VALLEYVIEW(dev))
+ port_sel = I915_READ(pp_on_reg) & 0xc0000000;
/* And finally store the new values in the power sequencer. */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
@@ -2761,8 +2729,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
- << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
@@ -2770,19 +2737,21 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
* power sequencer any more. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (is_cpu_edp(intel_dp))
- pp_on |= PANEL_POWER_PORT_DP_A;
+ port_sel = PANEL_POWER_PORT_DP_A;
else
- pp_on |= PANEL_POWER_PORT_DP_D;
+ port_sel = PANEL_POWER_PORT_DP_D;
}
- I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
- I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
- I915_WRITE(PCH_PP_DIVISOR, pp_div);
+ pp_on |= port_sel;
+
+ I915_WRITE(pp_on_reg, pp_on);
+ I915_WRITE(pp_off_reg, pp_off);
+ I915_WRITE(pp_div_reg, pp_div);
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(PCH_PP_ON_DELAYS),
- I915_READ(PCH_PP_OFF_DELAYS),
- I915_READ(PCH_PP_DIVISOR));
+ I915_READ(pp_on_reg),
+ I915_READ(pp_off_reg),
+ I915_READ(pp_div_reg));
}
void
@@ -2829,7 +2798,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2844,27 +2812,46 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
+ if (HAS_DDI(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+ break;
+ case PORT_B:
+ intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+ break;
+ case PORT_C:
+ intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+ break;
+ case PORT_D:
+ intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+ break;
+ default:
+ BUG();
+ }
+ }
/* Set up the DDC bus. */
switch (port) {
case PORT_A:
+ intel_encoder->hpd_pin = HPD_PORT_A;
name = "DPDDC-A";
break;
case PORT_B:
- dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
+ intel_encoder->hpd_pin = HPD_PORT_B;
name = "DPDDC-B";
break;
case PORT_C:
- dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
+ intel_encoder->hpd_pin = HPD_PORT_C;
name = "DPDDC-C";
break;
case PORT_D:
- dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
+ intel_encoder->hpd_pin = HPD_PORT_D;
name = "DPDDC-D";
break;
default:
- WARN(1, "Invalid port %c\n", port_name(port));
- break;
+ BUG();
}
if (is_edp(intel_dp))
@@ -2974,6 +2961,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+ intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->disable = intel_disable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 07ebac6..b5b6d19 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -33,12 +33,21 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_helper.h>
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
- ret__ = -ETIMEDOUT; \
+ if (!(COND)) \
+ ret__ = -ETIMEDOUT; \
break; \
} \
if (W && drm_can_sleep()) { \
@@ -50,21 +59,10 @@
ret__; \
})
-#define wait_for_atomic_us(COND, US) ({ \
- unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- cpu_relax(); \
- } \
- ret__; \
-})
-
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define wait_for_atomic_us(COND, US) _wait_for((COND), \
+ DIV_ROUND_UP((US), 1000), 0)
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -101,34 +99,6 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-/* drm_display_mode->private_flags */
-#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
-#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
-#define INTEL_MODE_DP_FORCE_6BPC (0x10)
-/* This flag must be set by the encoder's mode_fixup if it changes the crtc
- * timings in the mode to prevent the crtc fixup from overwriting them.
- * Currently only lvds needs that. */
-#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
-/*
- * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
- * to be used.
- */
-#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
-
-static inline void
-intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
- int multiplier)
-{
- mode->clock *= multiplier;
- mode->private_flags |= multiplier;
-}
-
-static inline int
-intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
-{
- return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
-}
-
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
@@ -158,9 +128,12 @@ struct intel_encoder {
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ bool (*compute_config)(struct intel_encoder *,
+ struct intel_crtc_config *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
+ void (*mode_set)(struct intel_encoder *intel_encoder);
void (*disable)(struct intel_encoder *);
void (*post_disable)(struct intel_encoder *);
/* Read out the current hw state of this connector, returning true if
@@ -168,6 +141,7 @@ struct intel_encoder {
* it is connected to in the pipe parameter. */
bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
int crtc_mask;
+ enum hpd_pin hpd_pin;
};
struct intel_panel {
@@ -197,13 +171,65 @@ struct intel_connector {
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid;
+
+ /* since POLL and HPD connectors may use the same HPD line keep the native
+ state of connector->polled in case hotplug storm detection changes it */
+ u8 polled;
+};
+
+struct intel_crtc_config {
+ struct drm_display_mode requested_mode;
+ struct drm_display_mode adjusted_mode;
+ /* This flag must be set by the encoder's compute_config callback if it
+ * changes the crtc timings in the mode to prevent the crtc fixup from
+ * overwriting them. Currently only lvds needs that. */
+ bool timings_set;
+ /* Whether to set up the PCH/FDI. Note that we never allow sharing
+ * between pch encoders and cpu encoders. */
+ bool has_pch_encoder;
+
+ /* CPU Transcoder for the pipe. Currently this can only differ from the
+ * pipe on Haswell (where we have a special eDP transcoder). */
+ enum transcoder cpu_transcoder;
+
+ /*
+ * Use reduced/limited/broadcast rbg range, compressing from the full
+ * range fed into the crtcs.
+ */
+ bool limited_color_range;
+
+ /* DP has a bunch of special case unfortunately, so mark the pipe
+ * accordingly. */
+ bool has_dp_encoder;
+ bool dither;
+
+ /* Controls for the clock computation, to override various stages. */
+ bool clock_set;
+
+ /* Settings for the intel dpll used on pretty much everything but
+ * haswell. */
+ struct dpll {
+ unsigned n;
+ unsigned m1, m2;
+ unsigned p1, p2;
+ } dpll;
+
+ int pipe_bpp;
+ struct intel_link_m_n dp_m_n;
+ /**
+ * This is currently used by DP and HDMI encoders since those can have a
+ * target pixel clock != the port link clock (which is currently stored
+ * in adjusted_mode->clock).
+ */
+ int pixel_target_clock;
+ /* Used by SDVO (and if we ever fix it, HDMI). */
+ unsigned pixel_multiplier;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
- enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
@@ -230,7 +256,8 @@ struct intel_crtc {
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visible;
- unsigned int bpp;
+
+ struct intel_crtc_config config;
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
@@ -242,11 +269,16 @@ struct intel_crtc {
struct intel_plane {
struct drm_plane base;
+ int plane;
enum pipe pipe;
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
void (*update_plane)(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
@@ -347,7 +379,7 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
- u32 sdvox_reg;
+ u32 hdmi_reg;
int ddc_bus;
uint32_t color_range;
bool color_range_auto;
@@ -366,6 +398,7 @@ struct intel_hdmi {
struct intel_dp {
uint32_t output_reg;
+ uint32_t aux_ch_ctl_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
@@ -443,13 +476,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
- int sdvox_reg, enum port port);
+ int hdmi_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -464,18 +496,14 @@ extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
-void
-intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+extern bool intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
@@ -483,11 +511,8 @@ extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
-extern int intel_edp_target_clock(struct intel_encoder *,
- struct drm_display_mode *mode);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
@@ -531,6 +556,7 @@ extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
+extern void intel_plane_restore(struct drm_plane *plane);
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -636,6 +662,10 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
+extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -670,6 +700,7 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
+extern bool intel_using_power_well(struct drm_device *dev);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
@@ -681,7 +712,7 @@ extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
@@ -695,4 +726,6 @@ extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+extern void intel_display_handle_reset(struct drm_device *dev);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 00e70db..cc70b16 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -448,6 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
struct i2c_adapter *i2c;
int gpio;
+ bool dvoinit;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -467,7 +468,17 @@ void intel_dvo_init(struct drm_device *dev)
i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+
+ /* GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 981bdce..0e19e57 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -150,7 +150,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
info->screen_size = size;
-// memset(info->screen_base, 0, size);
+ /* This driver doesn't need a VT switch to restore the mode on resume */
+ info->skip_vt_switch = true;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -227,7 +228,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->helper.funcs = &intel_fb_helper_funcs;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- dev_priv->num_pipe,
+ INTEL_INFO(dev)->num_pipes,
INTELFB_CONN_LIMIT);
if (ret) {
kfree(ifbdev);
@@ -282,6 +283,9 @@ void intel_fb_restore_mode(struct drm_device *dev)
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return;
+
drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fa8ec4a..a905793 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
+ WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
@@ -120,13 +120,14 @@ static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
}
}
-static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+ enum transcoder cpu_transcoder)
{
switch (frame->type) {
case DIP_TYPE_AVI:
- return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+ return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
case DIP_TYPE_SPD:
- return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+ return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
@@ -293,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(ctl_reg);
@@ -332,6 +333,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
@@ -342,7 +344,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
if (intel_hdmi->rgb_quant_range_selectable) {
- if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
@@ -568,7 +570,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -597,40 +599,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 sdvox;
+ u32 hdmi_val;
- sdvox = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev))
- sdvox |= intel_hdmi->color_range;
+ hdmi_val = SDVO_ENCODING_HDMI;
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
+ hdmi_val |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_crtc->bpp > 24)
- sdvox |= COLOR_FORMAT_12bpc;
+ if (intel_crtc->config.pipe_bpp > 24)
+ hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
- sdvox |= COLOR_FORMAT_8bpc;
+ hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
- sdvox |= HDMI_MODE_SELECT;
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
- sdvox |= SDVO_AUDIO_ENABLE;
- sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+ hdmi_val |= SDVO_AUDIO_ENABLE;
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(encoder, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
- sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == PIPE_B)
- sdvox |= SDVO_PIPE_B_SELECT;
+ hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ else
+ hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
- I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
+ POSTING_READ(intel_hdmi->hdmi_reg);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
@@ -643,7 +645,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 tmp;
- tmp = I915_READ(intel_hdmi->sdvox_reg);
+ tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
return false;
@@ -660,6 +662,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
@@ -667,38 +670,32 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
if (intel_hdmi->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
- temp = I915_READ(intel_hdmi->sdvox_reg);
+ temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A
- * before disabling it. */
- if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->base.crtc;
- int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
- /* Restore the transcoder select bit. */
- if (pipe == PIPE_B)
- enable_bits |= SDVO_PIPE_B_SELECT;
- }
+ * before disabling it, so restore the transcoder select bit here. */
+ if (HAS_PCH_IBX(dev))
+ enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
temp |= enable_bits;
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
}
@@ -710,7 +707,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
- temp = I915_READ(intel_hdmi->sdvox_reg);
+ temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
@@ -720,12 +717,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
/* Again we need to write this twice. */
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
/* Transcoder selection bits only update
* effectively on vblank. */
@@ -740,21 +737,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
temp &= ~enable_bits;
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
}
@@ -772,23 +769,40 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_hdmi->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
- intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
else
intel_hdmi->color_range = 0;
}
if (intel_hdmi->color_range)
- adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+ pipe_config->limited_color_range = true;
+
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ pipe_config->has_pch_encoder = true;
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
+ * outputs.
+ */
+ if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
+ DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
+ pipe_config->pipe_bpp = 12*3;
+ } else {
+ DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
+ pipe_config->pipe_bpp = 8*3;
+ }
return true;
}
@@ -906,6 +920,9 @@ intel_hdmi_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_hdmi->color_range_auto;
+ uint32_t old_range = intel_hdmi->color_range;
+
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_hdmi->color_range_auto = true;
@@ -916,11 +933,16 @@ intel_hdmi_set_property(struct drm_connector *connector,
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false;
- intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
+
+ if (old_auto == intel_hdmi->color_range_auto &&
+ old_range == intel_hdmi->color_range)
+ return 0;
+
goto done;
}
@@ -941,7 +963,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
};
@@ -985,36 +1006,36 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
+ intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
+ intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
+ intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_A:
+ intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
default:
BUG();
}
- if (!HAS_PCH_SPLIT(dev)) {
- intel_hdmi->write_infoframe = g4x_write_infoframe;
- intel_hdmi->set_infoframes = g4x_set_infoframes;
- } else if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
- } else if (IS_HASWELL(dev)) {
+ } else if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
+ } else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) {
@@ -1045,7 +1066,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
}
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
@@ -1069,6 +1090,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
@@ -1078,7 +1100,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_encoder->cloneable = false;
intel_dig_port->port = port;
- intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+ intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = 0;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index ef4744e..5d24503 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -522,7 +522,9 @@ int intel_setup_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_NOP(dev))
+ return 0;
+ else if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else if (IS_VALLEYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3d1d974..f36f1ba 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,8 +261,6 @@ centre_horizontally(struct drm_display_mode *mode,
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
-
- mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
}
static void
@@ -284,8 +282,6 @@ centre_vertically(struct drm_display_mode *mode,
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
-
- mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
}
static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -301,17 +297,20 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
-static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_lvds_encoder *lvds_encoder =
+ to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->requested_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ unsigned int lvds_bpp;
int pipe;
/* Should never happen!! */
@@ -323,6 +322,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
+ if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpp = 8*3;
+ else
+ lvds_bpp = 6*3;
+
+ if (lvds_bpp != pipe_config->pipe_bpp) {
+ DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
+ pipe_config->pipe_bpp = lvds_bpp;
+ }
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -333,6 +343,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
+ pipe_config->has_pch_encoder = true;
+
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
@@ -359,6 +371,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
I915_WRITE(BCLRPAT(pipe), 0);
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ pipe_config->timings_set = true;
switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
@@ -618,7 +631,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
- intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);
drm_sysfs_connector_remove(connector);
@@ -661,7 +673,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .mode_fixup = intel_lvds_mode_fixup,
.mode_set = intel_lvds_mode_set,
};
@@ -850,6 +861,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
{ } /* terminating entry */
};
@@ -1019,12 +1038,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
- return IS_MOBILE(dev) && !IS_I830(dev);
+ if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ return true;
+
+ return false;
}
/**
@@ -1102,6 +1124,7 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+ intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6..eb5e6e9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -286,8 +286,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->backlight_level = level;
- if (dev_priv->backlight_enabled)
+ dev_priv->backlight.level = level;
+ if (dev_priv->backlight.device)
+ dev_priv->backlight.device->props.brightness = level;
+
+ if (dev_priv->backlight.enabled)
intel_panel_actually_set_backlight(dev, level);
}
@@ -295,7 +298,7 @@ void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->backlight_enabled = false;
+ dev_priv->backlight.enabled = false;
intel_panel_actually_set_backlight(dev, 0);
if (INTEL_INFO(dev)->gen >= 4) {
@@ -318,8 +321,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight_level == 0)
- dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ if (dev_priv->backlight.level == 0) {
+ dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+ if (dev_priv->backlight.device)
+ dev_priv->backlight.device->props.brightness =
+ dev_priv->backlight.level;
+ }
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@@ -335,7 +342,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (tmp & BLM_PWM_ENABLE)
goto set_level;
- if (dev_priv->num_pipe == 3)
+ if (INTEL_INFO(dev)->num_pipes == 3)
tmp &= ~BLM_PIPE_SELECT_IVB;
else
tmp &= ~BLM_PIPE_SELECT;
@@ -360,16 +367,16 @@ set_level:
* BLC_PWM_CPU_CTL may be cleared to zero automatically when these
* registers are set.
*/
- dev_priv->backlight_enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+ dev_priv->backlight.enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
- dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+ dev_priv->backlight.level = intel_panel_get_backlight(dev);
+ dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
}
enum drm_connector_status
@@ -405,8 +412,7 @@ static int intel_panel_update_status(struct backlight_device *bd)
static int intel_panel_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
- struct drm_i915_private *dev_priv = dev->dev_private;
- return dev_priv->backlight_level;
+ return intel_panel_get_backlight(dev);
}
static const struct backlight_ops intel_panel_bl_ops = {
@@ -422,33 +428,38 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
intel_panel_init_backlight(dev);
+ if (WARN_ON(dev_priv->backlight.device))
+ return -ENODEV;
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
+ props.brightness = dev_priv->backlight.level;
props.max_brightness = _intel_panel_get_max_backlight(dev);
if (props.max_brightness == 0) {
DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;
}
- dev_priv->backlight =
+ dev_priv->backlight.device =
backlight_device_register("intel_backlight",
&connector->kdev, dev,
&intel_panel_bl_ops, &props);
- if (IS_ERR(dev_priv->backlight)) {
+ if (IS_ERR(dev_priv->backlight.device)) {
DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(dev_priv->backlight));
- dev_priv->backlight = NULL;
+ PTR_ERR(dev_priv->backlight.device));
+ dev_priv->backlight.device = NULL;
return -ENODEV;
}
- dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
return 0;
}
void intel_panel_destroy_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight)
- backlight_device_unregister(dev_priv->backlight);
+ if (dev_priv->backlight.device) {
+ backlight_device_unregister(dev_priv->backlight.device);
+ dev_priv->backlight.device = NULL;
+ }
}
#else
int intel_panel_setup_backlight(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index adca007..de3b0dc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2460,10 +2460,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val == dev_priv->rps.cur_delay)
return;
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(val) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
+ if (IS_HASWELL(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(val));
+ else
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
@@ -2554,8 +2558,8 @@ static void gen6_enable_rps(struct drm_device *dev)
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- /* In units of 100MHz */
- dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ /* In units of 50MHz */
+ dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
dev_priv->rps.cur_delay = 0;
@@ -2601,12 +2605,19 @@ static void gen6_enable_rps(struct drm_device *dev)
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(10));
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(12));
+ } else {
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+ }
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
@@ -2631,9 +2642,11 @@ static void gen6_enable_rps(struct drm_device *dev)
if (!ret) {
pcu_mbox = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
- if (ret && pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->rps.max_delay = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+ (dev_priv->rps.max_delay & 0xff) * 50,
+ (pcu_mbox & 0xff) * 50);
+ dev_priv->rps.hw_max = pcu_mbox & 0xff;
}
} else {
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
@@ -2671,8 +2684,8 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
- int gpu_freq;
- unsigned int ia_freq, max_ia_freq;
+ unsigned int gpu_freq;
+ unsigned int max_ia_freq, min_ring_freq;
int scaling_factor = 180;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -2688,6 +2701,10 @@ static void gen6_update_ring_freq(struct drm_device *dev)
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
+ min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
+ /* convert DDR frequency from units of 133.3MHz to bandwidth */
+ min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
@@ -2696,21 +2713,32 @@ static void gen6_update_ring_freq(struct drm_device *dev)
for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
int diff = dev_priv->rps.max_delay - gpu_freq;
-
- /*
- * For GPU frequencies less than 750MHz, just use the lowest
- * ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
- ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (IS_HASWELL(dev)) {
+ ring_freq = (gpu_freq * 5 + 3) / 4;
+ ring_freq = max(min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ /* On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
- ia_freq | gpu_freq);
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
}
}
@@ -2821,7 +2849,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_idle(ring);
dev_priv->mm.interruptible = was_interruptible;
if (ret) {
- DRM_ERROR("failed to enable ironlake power power savings\n");
+ DRM_ERROR("failed to enable ironlake power savings\n");
ironlake_teardown_rc6(dev);
return;
}
@@ -3562,6 +3590,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -3574,8 +3603,17 @@ static void cpt_init_clock_gating(struct drm_device *dev)
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ for_each_pipe(pipe) {
+ val = I915_READ(TRANS_CHICKEN2(pipe));
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ if (dev_priv->fdi_rx_polarity_inverted)
+ val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+ val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+ I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ }
/* WADP0ClockGatingDisable */
for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe),
@@ -3768,6 +3806,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
+ /* WaSwitchSolVfFArbitrationPriority */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
@@ -3874,7 +3915,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- cpt_init_clock_gating(dev);
+ if (!HAS_PCH_NOP(dev))
+ cpt_init_clock_gating(dev);
gen6_check_mch_setup(dev);
}
@@ -3899,8 +3941,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaDisablePSDDualDispatchEnable */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -3968,24 +4012,20 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
- * On ValleyView, the GUnit needs to signal the GT
- * when flip and other events complete. So enable
- * all the GUnit->GT interrupts here
- */
- I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
- PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
- SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
- PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
- PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
- SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
- PLANEA_FLIPDONE_INT_EN);
-
- /*
* WaDisableVLVClockGating_VBIIssue
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
+
+ /* Conservative clock gating settings for now */
+ I915_WRITE(0x9400, 0xffffffff);
+ I915_WRITE(0x9404, 0xffffffff);
+ I915_WRITE(0x9408, 0xffffffff);
+ I915_WRITE(0x940c, 0xffffffff);
+ I915_WRITE(0x9410, 0xffffffff);
+ I915_WRITE(0x9414, 0xffffffff);
+ I915_WRITE(0x9418, 0xffffffff);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -4070,13 +4110,29 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev);
}
+/**
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+bool intel_using_power_well(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev))
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
+ else
+ return true;
+}
+
void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
- if (!IS_HASWELL(dev))
+ if (!HAS_POWER_WELL(dev))
return;
if (!i915_disable_power_well && !enable)
@@ -4114,7 +4170,7 @@ void intel_init_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev))
+ if (!HAS_POWER_WELL(dev))
return;
/* For now, we need the power well to be always enabled. */
@@ -4176,7 +4232,6 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
- /* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
@@ -4274,21 +4329,14 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
- u32 forcewake_ack;
-
- if (IS_HASWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_ACK;
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
@@ -4311,7 +4359,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_MT_ACK;
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
@@ -4319,7 +4367,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
@@ -4409,15 +4457,22 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+ DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -4425,8 +4480,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- POSTING_READ(FORCEWAKE_ACK_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4511,3 +4567,56 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
+
+static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
+ u8 addr, u32 *val)
+{
+ u32 cmd, devfn, port, be, bar;
+
+ bar = 0;
+ be = 0xf;
+ port = IOSF_PORT_PUNIT;
+ devfn = PCI_DEVFN(2, 0);
+
+ cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+ (bar << IOSF_BAR_SHIFT);
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
+ DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
+ opcode == PUNIT_OPCODE_REG_READ ?
+ "read" : "write");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(VLV_IOSF_ADDR, addr);
+ if (opcode == PUNIT_OPCODE_REG_WRITE)
+ I915_WRITE(VLV_IOSF_DATA, *val);
+ I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+ if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
+ opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
+ addr);
+ return -ETIMEDOUT;
+ }
+
+ if (opcode == PUNIT_OPCODE_REG_READ)
+ *val = I915_READ(VLV_IOSF_DATA);
+ I915_WRITE(VLV_IOSF_DATA, 0);
+
+ return 0;
+}
+
+int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
+{
+ return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
+}
+
+int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+{
+ return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 78413ec..d154284 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -246,11 +246,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
return;
}
- if (intel_sdvo->sdvo_reg == SDVOB) {
- cval = I915_READ(SDVOC);
- } else {
- bval = I915_READ(SDVOB);
- }
+ if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
+ cval = I915_READ(GEN3_SDVOC);
+ else
+ bval = I915_READ(GEN3_SDVOB);
+
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
@@ -258,10 +258,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
*/
for (i = 0; i < 2; i++)
{
- I915_WRITE(SDVOB, bval);
- I915_READ(SDVOB);
- I915_WRITE(SDVOC, cval);
- I915_READ(SDVOC);
+ I915_WRITE(GEN3_SDVOB, bval);
+ I915_READ(GEN3_SDVOB);
+ I915_WRITE(GEN3_SDVOC, cval);
+ I915_READ(GEN3_SDVOC);
}
}
@@ -788,7 +788,6 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
v_sync_offset = mode->vsync_start - mode->vdisplay;
mode_clock = mode->clock;
- mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
mode_clock /= 10;
dtd->part1.clock = mode_clock;
@@ -957,14 +956,17 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
.len = DIP_LEN_AVI,
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
if (intel_sdvo->rgb_quant_range_selectable) {
- if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
}
+ avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
+
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1039,12 +1041,18 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return true;
}
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- int multiplier;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->requested_mode;
+
+ DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+ pipe_config->pipe_bpp = 8*3;
+
+ if (HAS_PCH_SPLIT(encoder->base.dev))
+ pipe_config->has_pch_encoder = true;
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
@@ -1071,37 +1079,40 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
*/
- multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
- intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+ pipe_config->pixel_multiplier =
+ intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ adjusted_mode->clock *= pipe_config->pixel_multiplier;
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
+ /* FIXME: This bit is only valid when using TMDS encoding and 8
+ * bit per color mode. */
if (intel_sdvo->has_hdmi_monitor &&
drm_match_cea_mode(adjusted_mode) > 1)
- intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
else
intel_sdvo->color_range = 0;
}
if (intel_sdvo->color_range)
- adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+ pipe_config->limited_color_range = true;
return true;
}
-static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
if (!mode)
@@ -1161,7 +1172,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (pixel_multiplier) {
+ switch (intel_crtc->config.pixel_multiplier) {
default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1182,10 +1193,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
- case SDVOB:
+ case GEN3_SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
- case SDVOC:
+ case GEN3_SDVOC:
sdvox &= SDVOC_PRESERVE_MASK;
break;
}
@@ -1193,9 +1204,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
- sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
else
- sdvox |= TRANSCODER(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1205,7 +1216,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+ << SDVO_PORT_MULTIPLY_SHIFT;
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
@@ -1235,11 +1247,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u16 active_outputs;
u32 tmp;
tmp = I915_READ(intel_sdvo->sdvo_reg);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (!(tmp & SDVO_ENABLE))
+ if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
if (HAS_PCH_CPT(dev))
@@ -1305,15 +1319,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) {
/* HW workaround for IBX, we need to move the port
- * to transcoder A before disabling it. */
- if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->base.crtc;
- int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
- /* Restore the transcoder select bit. */
- if (pipe == PIPE_B)
- temp |= SDVO_PIPE_B_SELECT;
- }
+ * to transcoder A before disabling it, so restore it here. */
+ if (HAS_PCH_IBX(dev))
+ temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
@@ -1922,6 +1930,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_sdvo->color_range_auto;
+ uint32_t old_range = intel_sdvo->color_range;
+
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_sdvo->color_range_auto = true;
@@ -1932,11 +1943,18 @@ intel_sdvo_set_property(struct drm_connector *connector,
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_sdvo->color_range_auto = false;
- intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ /* FIXME: this bit is only valid when using TMDS
+ * encoding and 8 bit per color mode. */
+ intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
+
+ if (old_auto == intel_sdvo->color_range_auto &&
+ old_range == intel_sdvo->color_range)
+ return 0;
+
goto done;
}
@@ -2040,11 +2058,6 @@ done:
#undef CHECK_PROPERTY
}
-static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .mode_fixup = intel_sdvo_mode_fixup,
- .mode_set = intel_sdvo_mode_set,
-};
-
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
@@ -2269,7 +2282,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector = &intel_connector->base;
if (intel_sdvo_get_hotplug_support(intel_sdvo) &
intel_sdvo_connector->output_flag) {
- connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
@@ -2277,7 +2289,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2346,7 +2358,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
@@ -2739,7 +2751,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
-
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
@@ -2779,9 +2790,15 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
- drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active)
+ intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
+ intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->mode_set = intel_sdvo_mode_set;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
@@ -2807,12 +2824,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
*/
intel_sdvo->base.cloneable = false;
- /* Only enable the hotplug irq if we need it, to work around noisy
- * hotplug lines.
- */
- if (intel_sdvo->hotplug_active)
- dev_priv->hotplug_supported_mask |= hotplug_mask;
-
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1b6eb76..c7d25c5 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,174 @@
#include "i915_drv.h"
static void
+vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SP_PIXFORMAT_MASK;
+ sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SP_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUYV:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+ break;
+ case DRM_FORMAT_RGB565:
+ sprctl |= SP_FORMAT_BGR565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SP_FORMAT_BGRX8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ sprctl |= SP_FORMAT_BGRA8888;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SP_FORMAT_RGBX1010102;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ sprctl |= SP_FORMAT_RGBA1010102;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SP_FORMAT_RGBX8888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ sprctl |= SP_FORMAT_RGBA8888;
+ break;
+ default:
+ /*
+ * If we get here one of the upper layers failed to filter
+ * out the unsupported plane formats
+ */
+ BUG();
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SP_TILED;
+
+ sprctl |= SP_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+ obj->tiling_mode,
+ pixel_size,
+ fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+ else
+ I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+
+ I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPCNTR(pipe, plane), sprctl);
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+ sprsurf_offset);
+ POSTING_READ(SPSURF(pipe, plane));
+}
+
+static void
+vlv_disable_plane(struct drm_plane *dplane)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+
+ I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
+ ~SP_ENABLE);
+ /* Activate double buffered register update */
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+ POSTING_READ(SPSURF(pipe, plane));
+}
+
+static int
+vlv_update_colorkey(struct drm_plane *dplane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+ sprctl &= ~SP_SOURCE_KEY;
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+ I915_WRITE(SPCNTR(pipe, plane), sprctl);
+
+ POSTING_READ(SPKEYMSK(pipe, plane));
+
+ return 0;
+}
+
+static void
+vlv_get_colorkey(struct drm_plane *dplane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+
+ key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
+ key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
+ key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+ if (sprctl & SP_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -441,6 +609,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
old_obj = intel_plane->obj;
+ intel_plane->crtc_x = crtc_x;
+ intel_plane->crtc_y = crtc_y;
+ intel_plane->crtc_w = crtc_w;
+ intel_plane->crtc_h = crtc_h;
+ intel_plane->src_x = src_x;
+ intel_plane->src_y = src_y;
+ intel_plane->src_w = src_w;
+ intel_plane->src_h = src_h;
+
src_w = src_w >> 16;
src_h = src_h >> 16;
@@ -513,6 +690,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
mutex_lock(&dev->struct_mutex);
+ /* Note that this will apply the VT-d workaround for scanouts,
+ * which is more restrictive than required for sprites. (The
+ * primary plane requires 256KiB alignment with 64 PTE padding,
+ * the sprite planes only require 128KiB alignment and 32 PTE padding.
+ */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret)
goto out_unlock;
@@ -568,6 +750,8 @@ intel_disable_plane(struct drm_plane *plane)
if (!intel_plane->obj)
goto out;
+ intel_wait_for_vblank(dev, intel_plane->pipe);
+
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL;
@@ -647,6 +831,20 @@ out_unlock:
return ret;
}
+void intel_plane_restore(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (!plane->crtc || !plane->fb)
+ return;
+
+ intel_update_plane(plane, plane->crtc, plane->fb,
+ intel_plane->crtc_x, intel_plane->crtc_y,
+ intel_plane->crtc_w, intel_plane->crtc_h,
+ intel_plane->src_x, intel_plane->src_y,
+ intel_plane->src_w, intel_plane->src_h);
+}
+
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
@@ -670,8 +868,22 @@ static uint32_t snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static uint32_t vlv_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
int
-intel_plane_init(struct drm_device *dev, enum pipe pipe)
+intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
@@ -710,14 +922,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
intel_plane->can_scale = false;
else
intel_plane->can_scale = true;
- intel_plane->max_downscale = 2;
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->update_colorkey = ivb_update_colorkey;
- intel_plane->get_colorkey = ivb_get_colorkey;
-
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+ if (IS_VALLEYVIEW(dev)) {
+ intel_plane->max_downscale = 1;
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_colorkey = vlv_update_colorkey;
+ intel_plane->get_colorkey = vlv_get_colorkey;
+
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else {
+ intel_plane->max_downscale = 2;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ }
break;
default:
@@ -726,6 +950,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
}
intel_plane->pipe = pipe;
+ intel_plane->plane = plane;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d808421..b945bc5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -905,11 +905,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+intel_tv_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (!tv_mode)
@@ -918,7 +917,10 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
if (intel_encoder_check_is_cloned(&intel_tv->base))
return false;
- adjusted_mode->clock = tv_mode->clock;
+ pipe_config->adjusted_mode.clock = tv_mode->clock;
+ DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+ pipe_config->pipe_bpp = 8*3;
+
return true;
}
@@ -1485,7 +1487,6 @@ out:
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .mode_fixup = intel_tv_mode_fixup,
.mode_set = intel_tv_mode_set,
};
@@ -1612,7 +1613,7 @@ intel_tv_init(struct drm_device *dev)
*
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
@@ -1620,6 +1621,7 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
+ intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 4d932c4..bf29b2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -115,6 +115,8 @@ struct mga_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
struct mga_crtc {
@@ -215,7 +217,7 @@ mgag200_bo(struct ttm_buffer_object *bo)
{
return container_of(bo, struct mgag200_bo, bo);
}
- /* mga_crtc.c */
+ /* mgag200_crtc.c */
void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -225,7 +227,7 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
int mgag200_modeset_init(struct mga_device *mdev);
void mgag200_modeset_fini(struct mga_device *mdev);
- /* mga_fbdev.c */
+ /* mgag200_fb.c */
int mgag200_fbdev_init(struct mga_device *mdev);
void mgag200_fbdev_fini(struct mga_device *mdev);
@@ -254,7 +256,7 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset);
- /* mga_i2c.c */
+ /* mgag200_i2c.c */
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index d2253f6..5da824c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
obj = mfbdev->mfb.obj;
bo = gem_to_mga_bo(obj);
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = mgag200_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&mfbdev->dirty_lock, flags);
+
+ if (mfbdev->y1 < y)
+ y = mfbdev->y1;
+ if (mfbdev->y2 > y2)
+ y2 = mfbdev->y2;
+ if (mfbdev->x1 < x)
+ x = mfbdev->x1;
+ if (mfbdev->x2 > x2)
+ x2 = mfbdev->x2;
+
+ if (store_for_later) {
+ mfbdev->x1 = x;
+ mfbdev->x2 = x2;
+ mfbdev->y1 = y;
+ mfbdev->y2 = y2;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
return;
}
+ mfbdev->x1 = mfbdev->y1 = INT_MAX;
+ mfbdev->x2 = mfbdev->y2 = 0;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
}
unmap = true;
}
- for (i = y; i < y + height; i++) {
+ for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
if (unmap)
@@ -105,12 +141,9 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
- u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = mgag200_gem_create(dev, size, true, &gobj);
@@ -249,19 +282,19 @@ int mgag200_fbdev_init(struct mga_device *mdev)
struct mga_fbdev *mfbdev;
int ret;
- mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+ mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
if (!mfbdev)
return -ENOMEM;
mdev->mfbdev = mfbdev;
mfbdev->helper.funcs = &mga_fb_helper_funcs;
+ spin_lock_init(&mfbdev->dirty_lock);
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
- if (ret) {
- kfree(mfbdev);
+ if (ret)
return ret;
- }
+
drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
@@ -278,6 +311,4 @@ void mgag200_fbdev_fini(struct mga_device *mdev)
return;
mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
- kfree(mdev->mfbdev);
- mdev->mfbdev = NULL;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 64297c7..9905923 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -76,15 +76,6 @@ static const struct drm_mode_config_funcs mga_mode_funcs = {
.fb_create = mgag200_user_framebuffer_create,
};
-/* Unmap the framebuffer from the core and release the memory */
-static void mga_vram_fini(struct mga_device *mdev)
-{
- pci_iounmap(mdev->dev->pdev, mdev->rmmio);
- mdev->rmmio = NULL;
- if (mdev->mc.vram_base)
- release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
-}
-
static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
{
int offset;
@@ -140,7 +131,7 @@ static int mga_vram_init(struct mga_device *mdev)
remove_conflicting_framebuffers(aper, "mgafb", true);
kfree(aper);
- if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+ if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
DRM_ERROR("can't reserve VRAM\n");
return -ENXIO;
@@ -173,13 +164,13 @@ static int mgag200_device_init(struct drm_device *dev,
mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
- if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+ if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
"mgadrmfb_mmio")) {
DRM_ERROR("can't reserve mmio registers\n");
return -ENOMEM;
}
- mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+ mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
if (mdev->rmmio == NULL)
return -ENOMEM;
@@ -188,10 +179,8 @@ static int mgag200_device_init(struct drm_device *dev,
mdev->reg_1e24 = RREG32(0x1e24);
ret = mga_vram_init(mdev);
- if (ret) {
- release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ if (ret)
return ret;
- }
mdev->bpp_shifts[0] = 0;
mdev->bpp_shifts[1] = 1;
@@ -200,12 +189,6 @@ static int mgag200_device_init(struct drm_device *dev,
return 0;
}
-void mgag200_device_fini(struct mga_device *mdev)
-{
- release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
- mga_vram_fini(mdev);
-}
-
/*
* Functions here will be called by the core once it's bound the driver to
* a PCI device
@@ -217,7 +200,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
struct mga_device *mdev;
int r;
- mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+ mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
if (mdev == NULL)
return -ENOMEM;
dev->dev_private = (void *)mdev;
@@ -234,8 +217,6 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
@@ -258,8 +239,6 @@ int mgag200_driver_unload(struct drm_device *dev)
mgag200_fbdev_fini(mdev);
drm_mode_config_cleanup(dev);
mgag200_mm_fini(mdev);
- mgag200_device_fini(mdev);
- kfree(mdev);
dev->dev_private = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 78d8e919..f988965 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1254,9 +1254,8 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
};
/* CRTC setup */
-static void mga_crtc_init(struct drm_device *dev)
+static void mga_crtc_init(struct mga_device *mdev)
{
- struct mga_device *mdev = dev->dev_private;
struct mga_crtc *mga_crtc;
int i;
@@ -1267,7 +1266,7 @@ static void mga_crtc_init(struct drm_device *dev)
if (mga_crtc == NULL)
return;
- drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+ drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
mdev->mode_info.crtc = mga_crtc;
@@ -1522,7 +1521,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
- mga_crtc_init(mdev->dev);
+ mga_crtc_init(mdev);
encoder = mga_encoder_init(mdev->dev);
if (!encoder) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 8fc9d92..401c989 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("reserve failed %p\n", bo);
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p %d\n", bo, ret);
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 90f9140..998e8b4 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -53,15 +53,6 @@ nouveau-y += core/subdev/clock/nva3.o
nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/pllnv04.o
nouveau-y += core/subdev/clock/pllnva3.o
-nouveau-y += core/subdev/device/base.o
-nouveau-y += core/subdev/device/nv04.o
-nouveau-y += core/subdev/device/nv10.o
-nouveau-y += core/subdev/device/nv20.o
-nouveau-y += core/subdev/device/nv30.o
-nouveau-y += core/subdev/device/nv40.o
-nouveau-y += core/subdev/device/nv50.o
-nouveau-y += core/subdev/device/nvc0.o
-nouveau-y += core/subdev/device/nve0.o
nouveau-y += core/subdev/devinit/base.o
nouveau-y += core/subdev/devinit/nv04.o
nouveau-y += core/subdev/devinit/nv05.o
@@ -126,6 +117,7 @@ nouveau-y += core/subdev/therm/ic.o
nouveau-y += core/subdev/therm/temp.o
nouveau-y += core/subdev/therm/nv40.o
nouveau-y += core/subdev/therm/nv50.o
+nouveau-y += core/subdev/therm/nv84.o
nouveau-y += core/subdev/therm/nva3.o
nouveau-y += core/subdev/therm/nvd0.o
nouveau-y += core/subdev/timer/base.o
@@ -150,6 +142,15 @@ nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/nv04.o
+nouveau-y += core/engine/device/nv10.o
+nouveau-y += core/engine/device/nv20.o
+nouveau-y += core/engine/device/nv30.o
+nouveau-y += core/engine/device/nv40.o
+nouveau-y += core/engine/device/nv50.o
+nouveau-y += core/engine/device/nvc0.o
+nouveau-y += core/engine/device/nve0.o
nouveau-y += core/engine/disp/base.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
@@ -159,6 +160,7 @@ nouveau-y += core/engine/disp/nva0.o
nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/nvf0.o
nouveau-y += core/engine/disp/dacnv50.o
nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
@@ -212,7 +214,7 @@ nouveau-y += core/engine/vp/nve0.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
-nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
+nouveau-y += nouveau_vga.o nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_prime.o nouveau_abi16.o
nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
@@ -224,9 +226,7 @@ nouveau-y += nouveau_connector.o nouveau_dp.o
nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nv04:nv50
-nouveau-y += nouveau_hw.o nouveau_calc.o
-nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
-nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
+include $(src)/dispnv04/Makefile
# drm/kms/nv50-
nouveau-y += nv50_display.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 295c221..9079c0a 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -27,7 +27,7 @@
#include <core/handle.h>
#include <core/option.h>
-#include <subdev/device.h>
+#include <engine/device.h>
static void
nouveau_client_dtor(struct nouveau_object *object)
@@ -58,8 +58,9 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
return -ENODEV;
ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
- NV_CLIENT_CLASS, nouveau_device_sclass,
- 0, length, pobject);
+ NV_CLIENT_CLASS, NULL,
+ (1ULL << NVDEV_ENGINE_DEVICE),
+ length, pobject);
client = *pobject;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
index 09b3bd5..c8bed4a 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -33,7 +33,6 @@ nouveau_engine_create_(struct nouveau_object *parent,
const char *iname, const char *fname,
int length, void **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nouveau_engine *engine;
int ret;
@@ -43,7 +42,8 @@ nouveau_engine_create_(struct nouveau_object *parent,
if (ret)
return ret;
- if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+ if ( parent &&
+ !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
if (!enable)
nv_warn(engine, "disabled, %s=1 to enable\n", iname);
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 6d01e0f..7eb81c1 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -27,8 +27,10 @@ static void
nouveau_event_put_locked(struct nouveau_event *event, int index,
struct nouveau_eventh *handler)
{
- if (!--event->index[index].refs)
- event->disable(event, index);
+ if (!--event->index[index].refs) {
+ if (event->disable)
+ event->disable(event, index);
+ }
list_del(&handler->head);
}
@@ -53,8 +55,10 @@ nouveau_event_get(struct nouveau_event *event, int index,
spin_lock_irqsave(&event->lock, flags);
if (index < event->index_nr) {
list_add(&handler->head, &event->index[index].list);
- if (!event->index[index].refs++)
- event->enable(event, index);
+ if (!event->index[index].refs++) {
+ if (event->enable)
+ event->enable(event, index);
+ }
}
spin_unlock_irqrestore(&event->lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 3b2e7b63..7f48e28 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -136,26 +136,30 @@ nouveau_object_ctor(struct nouveau_object *parent,
struct nouveau_object **pobject)
{
struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
+ struct nouveau_object *object = NULL;
int ret;
- *pobject = NULL;
-
- ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
+ ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
+ *pobject = object;
if (ret < 0) {
if (ret != -ENODEV) {
nv_error(parent, "failed to create 0x%08x, %d\n",
oclass->handle, ret);
}
- if (*pobject) {
- ofuncs->dtor(*pobject);
+ if (object) {
+ ofuncs->dtor(object);
*pobject = NULL;
}
return ret;
}
- nv_debug(*pobject, "created\n");
+ if (ret == 0) {
+ nv_debug(object, "created\n");
+ atomic_set(&object->refcount, 1);
+ }
+
return 0;
}
@@ -327,6 +331,7 @@ nouveau_object_inc(struct nouveau_object *object)
}
ret = nv_ofuncs(object)->init(object);
+ atomic_set(&object->usecount, 1);
if (ret) {
nv_error(object, "init failed, %d\n", ret);
goto fail_self;
@@ -357,6 +362,7 @@ nouveau_object_decf(struct nouveau_object *object)
nv_trace(object, "stopping...\n");
ret = nv_ofuncs(object)->fini(object, false);
+ atomic_set(&object->usecount, 0);
if (ret)
nv_warn(object, "failed fini, %d\n", ret);
@@ -381,6 +387,7 @@ nouveau_object_decs(struct nouveau_object *object)
nv_trace(object, "suspending...\n");
ret = nv_ofuncs(object)->fini(object, true);
+ atomic_set(&object->usecount, 0);
if (ret) {
nv_error(object, "failed suspend, %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index db7c549..313380c 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -24,6 +24,7 @@
#include <core/object.h>
#include <core/parent.h>
+#include <core/client.h>
int
nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
@@ -50,7 +51,12 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
while (mask) {
int i = ffsll(mask) - 1;
- if ((engine = nouveau_engine(parent, i))) {
+ if (nv_iclass(parent, NV_CLIENT_CLASS))
+ engine = nv_engine(nv_client(parent)->device);
+ else
+ engine = nouveau_engine(parent, i);
+
+ if (engine) {
oclass = engine->sclass;
while (oclass->ofuncs) {
if ((oclass->handle & 0xffff) == handle) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 3937ced..4c72571 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
#include <core/class.h>
-#include <subdev/device.h>
+#include <engine/device.h>
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
@@ -55,7 +55,6 @@ nouveau_device_find(u64 name)
struct nouveau_devobj {
struct nouveau_parent base;
struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
- bool created;
};
static const u64 disable_map[] = {
@@ -173,7 +172,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case 0xa0: device->card_type = NV_50; break;
case 0xc0: device->card_type = NV_C0; break;
case 0xd0: device->card_type = NV_D0; break;
- case 0xe0: device->card_type = NV_E0; break;
+ case 0xe0:
+ case 0xf0: device->card_type = NV_E0; break;
default:
break;
}
@@ -238,26 +238,24 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
}
/* ensure requested subsystems are available for use */
- for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
+ for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
continue;
- if (!device->subdev[i]) {
- ret = nouveau_object_ctor(nv_object(device), NULL,
- oclass, NULL, i,
- &devobj->subdev[i]);
- if (ret == -ENODEV)
- continue;
- if (ret)
- return ret;
-
- if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
- nouveau_subdev_reset(devobj->subdev[i]);
- } else {
+ if (device->subdev[i]) {
nouveau_object_ref(device->subdev[i],
&devobj->subdev[i]);
+ continue;
}
+ ret = nouveau_object_ctor(nv_object(device), NULL,
+ oclass, NULL, i,
+ &devobj->subdev[i]);
+ if (ret == -ENODEV)
+ continue;
+ if (ret)
+ return ret;
+
/* note: can't init *any* subdevs until devinit has been run
* due to not knowing exactly what the vbios init tables will
* mess with. devinit also can't be run until all of its
@@ -273,6 +271,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
ret = nouveau_object_inc(subdev);
if (ret)
return ret;
+ atomic_dec(&nv_object(device)->usecount);
+ } else
+ if (subdev) {
+ nouveau_subdev_reset(subdev);
}
}
}
@@ -292,74 +294,6 @@ nouveau_devobj_dtor(struct nouveau_object *object)
nouveau_parent_destroy(&devobj->base);
}
-static int
-nouveau_devobj_init(struct nouveau_object *object)
-{
- struct nouveau_devobj *devobj = (void *)object;
- struct nouveau_object *subdev;
- int ret, i;
-
- ret = nouveau_parent_init(&devobj->base);
- if (ret)
- return ret;
-
- for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
- if ((subdev = devobj->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_inc(subdev);
- if (ret)
- goto fail;
- }
- }
- }
-
- devobj->created = true;
- return 0;
-
-fail:
- for (--i; i >= 0; i--) {
- if ((subdev = devobj->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS))
- nouveau_object_dec(subdev, false);
- }
- }
-
- return ret;
-}
-
-static int
-nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
-{
- struct nouveau_devobj *devobj = (void *)object;
- struct nouveau_object *subdev;
- int ret, i;
-
- for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
- if ((subdev = devobj->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_dec(subdev, suspend);
- if (ret && suspend)
- goto fail;
- }
- }
- }
-
- ret = nouveau_parent_fini(&devobj->base, suspend);
-fail:
- for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
- if ((subdev = devobj->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nouveau_object_inc(subdev);
- if (ret) {
- /* XXX */
- }
- }
- }
- }
-
- return ret;
-}
-
static u8
nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
{
@@ -400,8 +334,8 @@ static struct nouveau_ofuncs
nouveau_devobj_ofuncs = {
.ctor = nouveau_devobj_ctor,
.dtor = nouveau_devobj_dtor,
- .init = nouveau_devobj_init,
- .fini = nouveau_devobj_fini,
+ .init = _nouveau_parent_init,
+ .fini = _nouveau_parent_fini,
.rd08 = nouveau_devobj_rd08,
.rd16 = nouveau_devobj_rd16,
.rd32 = nouveau_devobj_rd32,
@@ -413,12 +347,76 @@ nouveau_devobj_ofuncs = {
/******************************************************************************
* nouveau_device: engine functions
*****************************************************************************/
-struct nouveau_oclass
+static struct nouveau_oclass
nouveau_device_sclass[] = {
{ 0x0080, &nouveau_devobj_ofuncs },
{}
};
+static int
+nouveau_device_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_device *device = (void *)object;
+ struct nouveau_object *subdev;
+ int ret, i;
+
+ for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_dec(subdev, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+ }
+ }
+
+ ret = 0;
+fail:
+ for (; ret && i < NVDEV_SUBDEV_NR; i++) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_inc(subdev);
+ if (ret) {
+ /* XXX */
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+nouveau_device_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = (void *)object;
+ struct nouveau_object *subdev;
+ int ret, i;
+
+ for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_inc(subdev);
+ if (ret)
+ goto fail;
+ } else {
+ nouveau_subdev_reset(subdev);
+ }
+ }
+ }
+
+ ret = 0;
+fail:
+ for (--i; ret && i >= 0; i--) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS))
+ nouveau_object_dec(subdev, false);
+ }
+ }
+
+ return ret;
+}
+
static void
nouveau_device_dtor(struct nouveau_object *object)
{
@@ -428,17 +426,19 @@ nouveau_device_dtor(struct nouveau_object *object)
list_del(&device->head);
mutex_unlock(&nv_devices_mutex);
- if (device->base.mmio)
- iounmap(device->base.mmio);
+ if (nv_subdev(device)->mmio)
+ iounmap(nv_subdev(device)->mmio);
- nouveau_subdev_destroy(&device->base);
+ nouveau_engine_destroy(&device->base);
}
static struct nouveau_oclass
nouveau_device_oclass = {
- .handle = NV_SUBDEV(DEVICE, 0x00),
+ .handle = NV_ENGINE(DEVICE, 0x00),
.ofuncs = &(struct nouveau_ofuncs) {
.dtor = nouveau_device_dtor,
+ .init = nouveau_device_init,
+ .fini = nouveau_device_fini,
},
};
@@ -456,13 +456,12 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
goto done;
}
- ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
+ ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
"DEVICE", "device", length, pobject);
device = *pobject;
if (ret)
goto done;
- atomic_set(&nv_object(device)->usecount, 2);
device->pdev = pdev;
device->handle = name;
device->cfgopt = cfg;
@@ -470,6 +469,7 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
device->name = sname;
nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
+ nv_engine(device)->sclass = nouveau_device_sclass;
list_add(&device->head, &nv_devices);
done:
mutex_unlock(&nv_devices_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 473c5c0..a0284cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/i2c.h>
@@ -34,6 +33,7 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index d0774f5..1b7809a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
@@ -35,6 +34,7 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index ab920e0..12a4005 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
@@ -36,6 +35,7 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 5f21102..cef0f1e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
@@ -35,6 +34,7 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index f3d55ef..1719cb0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/vm.h>
@@ -37,6 +36,7 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 5ed2fa5..5e8c3de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
@@ -38,6 +37,7 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
@@ -83,7 +83,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -109,7 +109,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -135,7 +135,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -161,7 +161,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -187,7 +187,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -213,7 +213,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -265,7 +265,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -291,7 +291,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 4393eb4..955af12 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
@@ -40,6 +39,7 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
@@ -285,6 +285,34 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
+ case 0xd7:
+ device->cname = "GF117";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ break;
default:
nv_fatal(device, "unknown Fermi chipset\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 5c12391..a354e40 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <subdev/device.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
#include <subdev/gpio.h>
@@ -40,6 +39,7 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <engine/device.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
@@ -141,6 +141,40 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
+ case 0xf0:
+ device->cname = "GK110";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+#endif
+ break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index fa27b02..31cc8fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -191,7 +191,7 @@ dp_link_train_cr(struct dp_state *dp)
static int
dp_link_train_eq(struct dp_state *dp)
{
- bool eq_done, cr_done = true;
+ bool eq_done = false, cr_done = true;
int tries = 0, i;
dp_set_training_pattern(dp, 2);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 02e369f..6a38402 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -572,7 +572,8 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
priv->base.vblank->priv = priv;
priv->base.vblank->enable = nv50_disp_base_vblank_enable;
priv->base.vblank->disable = nv50_disp_base_vblank_disable;
- return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+ return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+ &base->ramht);
}
static void
@@ -719,7 +720,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
if (nv_mclass(parent) != NV_DEVICE_CLASS) {
atomic_inc(&parent->refcount);
*pobject = parent;
- return 0;
+ return 1;
}
/* allocate display hardware to client */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 788dd34..019eacd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -473,7 +473,8 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
- return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+ return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+ &base->ramht);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
new file mode 100644
index 0000000..a488c36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nvf0_disp_sclass[] = {
+ { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nvf0_disp_base_oclass[] = {
+ { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = nvf0_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp = &nvd0_sor_dp_func;
+ return 0;
+}
+
+struct nouveau_oclass
+nvf0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x92),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvf0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index d152875..944e73a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -50,6 +50,9 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
case NVE0_DISP_MAST_CLASS:
case NVE0_DISP_SYNC_CLASS:
case NVE0_DISP_OVLY_CLASS:
+ case NVF0_DISP_MAST_CLASS:
+ case NVF0_DISP_SYNC_CLASS:
+ case NVF0_DISP_OVLY_CLASS:
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 7341ebe..d3ec436d9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -91,6 +91,8 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
if (!chan->user)
return -EFAULT;
+ nouveau_event_trigger(priv->cevent, 0);
+
chan->size = size;
return 0;
}
@@ -167,6 +169,7 @@ nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
nouveau_event_destroy(&priv->uevent);
+ nouveau_event_destroy(&priv->cevent);
nouveau_engine_destroy(&priv->base);
}
@@ -191,6 +194,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
+ ret = nouveau_event_create(1, &priv->cevent);
+ if (ret)
+ return ret;
+
ret = nouveau_event_create(1, &priv->uevent);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 840af61..ddaeb55 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -210,7 +210,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
@@ -263,7 +264,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
@@ -373,17 +375,17 @@ nv50_fifo_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
&base->pgd);
if (ret)
return ret;
@@ -437,12 +439,12 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
&priv->playlist[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
&priv->playlist[1]);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 094000e..35b94bd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -180,7 +180,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
@@ -242,7 +243,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+ &chan->ramht);
if (ret)
return ret;
@@ -336,12 +338,12 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
0, &base->pgd);
if (ret)
return ret;
@@ -350,13 +352,13 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
- NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
+ 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
+ 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
if (ret)
return ret;
@@ -407,12 +409,12 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
&priv->playlist[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
&priv->playlist[1]);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 4f226af..4d4a6b9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -292,7 +292,8 @@ nvc0_fifo_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+ ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+ &base->pgd);
if (ret)
return ret;
@@ -623,17 +624,17 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
&priv->playlist[0]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
&priv->playlist[1]);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
&priv->user.mem);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 4419e40..9151919 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -96,7 +96,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
cur = engn->playlist[engn->cur_playlist];
if (unlikely(cur == NULL)) {
- int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
+ int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
0x8000, 0x1000, 0, &cur);
if (ret) {
nv_error(priv, "playlist alloc failed\n");
@@ -333,7 +333,8 @@ nve0_fifo_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+ ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+ &base->pgd);
if (ret)
return ret;
@@ -595,7 +596,7 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 0b7951a..4cc6269 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -36,7 +36,6 @@ int
nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_object *parent = nv_object(priv);
struct nouveau_gpuobj *chan;
u32 size = (0x80000 + priv->size + 4095) & ~4095;
int ret, i;
@@ -44,7 +43,7 @@ nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
/* allocate memory to for a "channel", which we'll use to generate
* the default context values
*/
- ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
chan = info->chan;
if (ret) {
@@ -1399,7 +1398,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
{
int i;
- for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+ for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@@ -1415,7 +1414,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
- for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+ for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@@ -1615,7 +1614,7 @@ static void
nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
{
- if (nv_device(priv)->chipset == 0xd9) {
+ if (nv_device(priv)->chipset >= 0xd0) {
nv_wr32(priv, 0x405800, 0x0f8000bf);
nv_wr32(priv, 0x405830, 0x02180218);
nv_wr32(priv, 0x405834, 0x08000000);
@@ -1658,10 +1657,10 @@ nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x4064ac, 0x00003fff);
nv_wr32(priv, 0x4064b4, 0x00000000);
nv_wr32(priv, 0x4064b8, 0x00000000);
- if (nv_device(priv)->chipset == 0xd9)
+ if (nv_device(priv)->chipset >= 0xd0)
nv_wr32(priv, 0x4064bc, 0x00000000);
if (nv_device(priv)->chipset == 0xc1 ||
- nv_device(priv)->chipset == 0xd9) {
+ nv_device(priv)->chipset >= 0xd0) {
nv_wr32(priv, 0x4064c0, 0x80140078);
nv_wr32(priv, 0x4064c4, 0x0086ffff);
}
@@ -1701,7 +1700,7 @@ nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
/* ROPC_BROADCAST */
nv_wr32(priv, 0x408800, 0x02802a3c);
nv_wr32(priv, 0x408804, 0x00000040);
- if (chipset == 0xd9) {
+ if (chipset >= 0xd0) {
nv_wr32(priv, 0x408808, 0x1043e005);
nv_wr32(priv, 0x408900, 0x3080b801);
nv_wr32(priv, 0x408904, 0x1043e005);
@@ -1735,7 +1734,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x418408, 0x00000000);
nv_wr32(priv, 0x41840c, 0x00001008);
nv_wr32(priv, 0x418410, 0x0fff0fff);
- nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+ nv_wr32(priv, 0x418414, chipset < 0xd0 ? 0x00200fff : 0x02200fff);
nv_wr32(priv, 0x418450, 0x00000000);
nv_wr32(priv, 0x418454, 0x00000000);
nv_wr32(priv, 0x418458, 0x00000000);
@@ -1750,14 +1749,14 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x418700, 0x00000002);
nv_wr32(priv, 0x418704, 0x00000080);
nv_wr32(priv, 0x418708, 0x00000000);
- nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+ nv_wr32(priv, 0x41870c, chipset < 0xd0 ? 0x07c80000 : 0x00000000);
nv_wr32(priv, 0x418710, 0x00000000);
- nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+ nv_wr32(priv, 0x418800, chipset < 0xd0 ? 0x0006860a : 0x7006860a);
nv_wr32(priv, 0x418808, 0x00000000);
nv_wr32(priv, 0x41880c, 0x00000000);
nv_wr32(priv, 0x418810, 0x00000000);
nv_wr32(priv, 0x418828, 0x00008442);
- if (chipset == 0xc1 || chipset == 0xd9)
+ if (chipset == 0xc1 || chipset >= 0xd0)
nv_wr32(priv, 0x418830, 0x10000001);
else
nv_wr32(priv, 0x418830, 0x00000001);
@@ -1768,7 +1767,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x4188f0, 0x00000000);
nv_wr32(priv, 0x4188f4, 0x00000000);
nv_wr32(priv, 0x4188f8, 0x00000000);
- if (chipset == 0xd9)
+ if (chipset >= 0xd0)
nv_wr32(priv, 0x4188fc, 0x20100008);
else if (chipset == 0xc1)
nv_wr32(priv, 0x4188fc, 0x00100018);
@@ -1787,7 +1786,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
}
- nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+ nv_wr32(priv, 0x418b00, chipset < 0xd0 ? 0x00000000 : 0x00000006);
nv_wr32(priv, 0x418b08, 0x0a418820);
nv_wr32(priv, 0x418b0c, 0x062080e6);
nv_wr32(priv, 0x418b10, 0x020398a4);
@@ -1804,7 +1803,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x418c24, 0x00000000);
nv_wr32(priv, 0x418c28, 0x00000000);
nv_wr32(priv, 0x418c2c, 0x00000000);
- if (chipset == 0xc1 || chipset == 0xd9)
+ if (chipset == 0xc1 || chipset >= 0xd0)
nv_wr32(priv, 0x418c6c, 0x00000001);
nv_wr32(priv, 0x418c80, 0x20200004);
nv_wr32(priv, 0x418c8c, 0x00000001);
@@ -1823,7 +1822,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x419818, 0x00000000);
nv_wr32(priv, 0x41983c, 0x00038bc7);
nv_wr32(priv, 0x419848, 0x00000000);
- if (chipset == 0xc1 || chipset == 0xd9)
+ if (chipset == 0xc1 || chipset >= 0xd0)
nv_wr32(priv, 0x419864, 0x00000129);
else
nv_wr32(priv, 0x419864, 0x0000012a);
@@ -1836,7 +1835,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x419a14, 0x00000200);
nv_wr32(priv, 0x419a1c, 0x00000000);
nv_wr32(priv, 0x419a20, 0x00000800);
- if (chipset == 0xd9)
+ if (chipset >= 0xd0)
nv_wr32(priv, 0x00419ac4, 0x0017f440);
else if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(priv, 0x00419ac4, 0x0007f440);
@@ -1847,16 +1846,16 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x419b10, 0x0a418820);
nv_wr32(priv, 0x419b14, 0x000000e6);
nv_wr32(priv, 0x419bd0, 0x00900103);
- if (chipset == 0xc1 || chipset == 0xd9)
+ if (chipset == 0xc1 || chipset >= 0xd0)
nv_wr32(priv, 0x419be0, 0x00400001);
else
nv_wr32(priv, 0x419be0, 0x00000001);
nv_wr32(priv, 0x419be4, 0x00000000);
- nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+ nv_wr32(priv, 0x419c00, chipset < 0xd0 ? 0x00000002 : 0x0000000a);
nv_wr32(priv, 0x419c04, 0x00000006);
nv_wr32(priv, 0x419c08, 0x00000002);
nv_wr32(priv, 0x419c20, 0x00000000);
- if (nv_device(priv)->chipset == 0xd9) {
+ if (nv_device(priv)->chipset >= 0xd0) {
nv_wr32(priv, 0x419c24, 0x00084210);
nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
nv_wr32(priv, 0x419cb0, 0x00020048);
@@ -1868,12 +1867,12 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
}
nv_wr32(priv, 0x419ce8, 0x00000000);
nv_wr32(priv, 0x419cf4, 0x00000183);
- if (chipset == 0xc1 || chipset == 0xd9)
+ if (chipset == 0xc1 || chipset >= 0xd0)
nv_wr32(priv, 0x419d20, 0x12180000);
else
nv_wr32(priv, 0x419d20, 0x02180000);
nv_wr32(priv, 0x419d24, 0x00001fff);
- if (chipset == 0xc1 || chipset == 0xd9)
+ if (chipset == 0xc1 || chipset >= 0xd0)
nv_wr32(priv, 0x419d44, 0x02180218);
nv_wr32(priv, 0x419e04, 0x00000000);
nv_wr32(priv, 0x419e08, 0x00000000);
@@ -2210,7 +2209,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
nv_icmd(priv, 0x00000215, 0x00000040);
nv_icmd(priv, 0x00000216, 0x00000040);
nv_icmd(priv, 0x00000217, 0x00000040);
- if (nv_device(priv)->chipset == 0xd9) {
+ if (nv_device(priv)->chipset >= 0xd0) {
for (i = 0x0400; i <= 0x0417; i++)
nv_icmd(priv, i, 0x00000040);
}
@@ -2222,7 +2221,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
nv_icmd(priv, 0x0000021d, 0x0000c080);
nv_icmd(priv, 0x0000021e, 0x0000c080);
nv_icmd(priv, 0x0000021f, 0x0000c080);
- if (nv_device(priv)->chipset == 0xd9) {
+ if (nv_device(priv)->chipset >= 0xd0) {
for (i = 0x0440; i <= 0x0457; i++)
nv_icmd(priv, i, 0x0000c080);
}
@@ -2789,7 +2788,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
nv_icmd(priv, 0x00000585, 0x0000003f);
nv_icmd(priv, 0x00000576, 0x00000003);
if (nv_device(priv)->chipset == 0xc1 ||
- nv_device(priv)->chipset == 0xd9)
+ nv_device(priv)->chipset >= 0xd0)
nv_icmd(priv, 0x0000057b, 0x00000059);
nv_icmd(priv, 0x00000586, 0x00000040);
nv_icmd(priv, 0x00000582, 0x00000080);
@@ -2891,7 +2890,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
nv_icmd(priv, 0x00000957, 0x00000003);
nv_icmd(priv, 0x0000095e, 0x20164010);
nv_icmd(priv, 0x0000095f, 0x00000020);
- if (nv_device(priv)->chipset == 0xd9)
+ if (nv_device(priv)->chipset >= 0xd0)
nv_icmd(priv, 0x0000097d, 0x00000020);
nv_icmd(priv, 0x00000683, 0x00000006);
nv_icmd(priv, 0x00000685, 0x003fffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
index 6d8c639..ae27dae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -2772,10 +2772,15 @@ nve0_grctx_generate(struct nvc0_graph_priv *priv)
for (i = 0; i < 8; i++)
nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
- nv_wr32(priv, 0x405b00, 0x201);
- nv_wr32(priv, 0x408850, 0x2);
- nv_wr32(priv, 0x408958, 0x2);
- nv_wr32(priv, 0x419f78, 0xa);
+ nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+ if (priv->gpc_nr == 1) {
+ nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
+ nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
+ } else {
+ nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
+ nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
+ }
+ nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
nve0_grctx_generate_icmd(priv);
nve0_grctx_generate_a097(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index b86cc60..f7055af 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -87,6 +87,11 @@ chipsets:
.b16 #nvd9_gpc_mmio_tail
.b16 #nvd9_tpc_mmio_head
.b16 #nvd9_tpc_mmio_tail
+.b8 0xd7 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 0bcfa4d..7fbdebb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -62,6 +62,9 @@ chipsets:
.b8 0xd9 0 0 0
.b16 #nvd9_hub_mmio_head
.b16 #nvd9_hub_mmio_tail
+.b8 0xd7 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
.b8 0 0 0 0
nvc0_hub_mmio_head:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 0607b98..b245593 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -254,7 +254,7 @@ nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
index b2b650d..7a80d00 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -142,7 +142,7 @@ nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
index 700462f..3e1f32e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -109,7 +109,7 @@ nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
index cedadaa..e451db3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -143,7 +143,7 @@ nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
index 273f632..9385ac7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -143,7 +143,7 @@ nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
index f40ee21..9ce84b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -141,7 +141,7 @@ nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 17049d5..193a5de 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -46,6 +46,14 @@ struct nv40_graph_chan {
struct nouveau_graph_chan base;
};
+static u64
+nv40_graph_units(struct nouveau_graph *graph)
+{
+ struct nv40_graph_priv *priv = (void *)graph;
+
+ return nv_rd32(priv, 0x1540);
+}
+
/*******************************************************************************
* Graphics object classes
******************************************************************************/
@@ -359,6 +367,8 @@ nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
else
nv_engine(priv)->sclass = nv40_graph_sclass;
nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
+
+ priv->base.units = nv40_graph_units;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index f2b1a7a..1ac3611 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -48,6 +48,14 @@ struct nv50_graph_chan {
struct nouveau_graph_chan base;
};
+static u64
+nv50_graph_units(struct nouveau_graph *graph)
+{
+ struct nv50_graph_priv *priv = (void *)graph;
+
+ return nv_rd32(priv, 0x1540);
+}
+
/*******************************************************************************
* Graphics object classes
******************************************************************************/
@@ -819,6 +827,8 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv50_graph_intr;
nv_engine(priv)->cclass = &nv50_graph_cclass;
+ priv->base.units = nv50_graph_units;
+
switch (nv_device(priv)->chipset) {
case 0x50:
nv_engine(priv)->sclass = nv50_graph_sclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 0de0dd7..f9b9d82 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -60,6 +60,19 @@ nvc8_graph_sclass[] = {
{}
};
+u64
+nvc0_graph_units(struct nouveau_graph *graph)
+{
+ struct nvc0_graph_priv *priv = (void *)graph;
+ u64 cfg;
+
+ cfg = (u32)priv->gpc_nr;
+ cfg |= (u32)priv->tpc_total << 8;
+ cfg |= (u64)priv->rop_nr << 32;
+
+ return cfg;
+}
+
/*******************************************************************************
* PGRAPH context
******************************************************************************/
@@ -89,7 +102,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
* fuc to modify some per-context register settings on first load
* of the context.
*/
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
+ ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
+ &chan->mmio);
if (ret)
return ret;
@@ -101,8 +115,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
- ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
- 0, &chan->data[i].mem);
+ ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
+ data->align, 0, &chan->data[i].mem);
if (ret)
return ret;
@@ -518,9 +532,10 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
+ bool enable = device->chipset != 0xd7;
int ret, i;
- ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -529,6 +544,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nvc0_graph_intr;
nv_engine(priv)->cclass = &nvc0_graph_cclass;
+ priv->base.units = nvc0_graph_units;
+
if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
nv_info(priv, "using external firmware\n");
if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -551,11 +568,13 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
break;
}
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+ &priv->unk4188b4);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+ &priv->unk4188b8);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index a1e78de..c870dad 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -118,6 +118,7 @@ nvc0_graph_class(void *obj)
return 0x9197;
case 0xc8:
case 0xd9:
+ case 0xd7:
return 0x9297;
case 0xe4:
case 0xe7:
@@ -169,4 +170,6 @@ int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nvc0_graph_context_dtor(struct nouveau_object *);
+u64 nvc0_graph_units(struct nouveau_graph *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 4857f91..678c16f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -77,11 +77,207 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x409c20, ustat);
}
+static const struct nouveau_enum nve0_mp_warp_error[] = {
+ { 0x00, "NO_ERROR" },
+ { 0x01, "STACK_MISMATCH" },
+ { 0x05, "MISALIGNED_PC" },
+ { 0x08, "MISALIGNED_GPR" },
+ { 0x09, "INVALID_OPCODE" },
+ { 0x0d, "GPR_OUT_OF_BOUNDS" },
+ { 0x0e, "MEM_OUT_OF_BOUNDS" },
+ { 0x0f, "UNALIGNED_MEM_ACCESS" },
+ { 0x11, "INVALID_PARAM" },
+ {}
+};
+
+static const struct nouveau_enum nve0_mp_global_error[] = {
+ { 2, "MULTIPLE_WARP_ERRORS" },
+ { 3, "OUT_OF_STACK_SPACE" },
+ {}
+};
+
+static const struct nouveau_enum nve0_gpc_rop_error[] = {
+ { 1, "RT_PITCH_OVERRUN" },
+ { 4, "RT_WIDTH_OVERRUN" },
+ { 5, "RT_HEIGHT_OVERRUN" },
+ { 7, "ZETA_STORAGE_TYPE_MISMATCH" },
+ { 8, "RT_STORAGE_TYPE_MISMATCH" },
+ { 10, "RT_LINEAR_MISMATCH" },
+ {}
+};
+
+static const struct nouveau_enum nve0_sked_error[] = {
+ { 7, "CONSTANT_BUFFER_SIZE" },
+ { 9, "LOCAL_MEMORY_SIZE_POS" },
+ { 10, "LOCAL_MEMORY_SIZE_NEG" },
+ { 11, "WARP_CSTACK_SIZE" },
+ { 12, "TOTAL_TEMP_SIZE" },
+ { 13, "REGISTER_COUNT" },
+ { 18, "TOTAL_THREADS" },
+ { 20, "PROGRAM_OFFSET" },
+ { 21, "SHARED_MEMORY_SIZE" },
+ { 25, "SHARED_CONFIG_TOO_SMALL" },
+ { 26, "TOTAL_REGISTER_COUNT" },
+ {}
+};
+
+static void
+nve0_graph_mp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
+{
+ int i;
+ u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x648));
+ u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x650));
+
+ nv_error(priv, "GPC%i/TP%i/MP trap:", gpc, tp);
+
+ for (i = 0; i <= 31; ++i) {
+ if (!(gerr & (1 << i)))
+ continue;
+ pr_cont(" ");
+ nouveau_enum_print(nve0_mp_global_error, i);
+ }
+ if (werr) {
+ pr_cont(" ");
+ nouveau_enum_print(nve0_mp_warp_error, werr & 0xffff);
+ }
+ pr_cont("\n");
+
+ /* disable MP trap to avoid spam */
+ nv_mask(priv, TPC_UNIT(gpc, tp, 0x50c), 0x2, 0x0);
+
+ /* TODO: figure out how to resume after an MP trap */
+}
+
+static void
+nve0_graph_tp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
+{
+ u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x508));
+
+ if (stat & 0x1) {
+ u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x224));
+ nv_error(priv, "GPC%i/TP%i/TEX trap: %08x\n",
+ gpc, tp, trap);
+
+ nv_wr32(priv, TPC_UNIT(gpc, tp, 0x224), 0xc0000000);
+ stat &= ~0x1;
+ }
+
+ if (stat & 0x2) {
+ nve0_graph_mp_trap(priv, gpc, tp);
+ stat &= ~0x2;
+ }
+
+ if (stat & 0x4) {
+ u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x084));
+ nv_error(priv, "GPC%i/TP%i/POLY trap: %08x\n",
+ gpc, tp, trap);
+
+ nv_wr32(priv, TPC_UNIT(gpc, tp, 0x084), 0xc0000000);
+ stat &= ~0x4;
+ }
+
+ if (stat & 0x8) {
+ u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x48c));
+ nv_error(priv, "GPC%i/TP%i/L1C trap: %08x\n",
+ gpc, tp, trap);
+
+ nv_wr32(priv, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000);
+ stat &= ~0x8;
+ }
+
+ if (stat) {
+ nv_error(priv, "GPC%i/TP%i: unknown stat %08x\n",
+ gpc, tp, stat);
+ }
+}
+
+static void
+nve0_graph_gpc_trap(struct nvc0_graph_priv *priv)
+{
+ const u32 mask = nv_rd32(priv, 0x400118);
+ int gpc;
+
+ for (gpc = 0; gpc < 4; ++gpc) {
+ u32 stat;
+ int tp;
+
+ if (!(mask & (1 << gpc)))
+ continue;
+ stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+
+ if (stat & 0x0001) {
+ u32 trap[4];
+ int i;
+
+ trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
+ trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
+ trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
+ trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+
+ nv_error(priv, "GPC%i/PROP trap:", gpc);
+ for (i = 0; i <= 29; ++i) {
+ if (!(trap[0] & (1 << i)))
+ continue;
+ pr_cont(" ");
+ nouveau_enum_print(nve0_gpc_rop_error, i);
+ }
+ pr_cont("\n");
+
+ nv_error(priv, "x = %u, y = %u, "
+ "format = %x, storage type = %x\n",
+ trap[1] & 0xffff,
+ trap[1] >> 16,
+ (trap[2] >> 8) & 0x3f,
+ trap[3] & 0xff);
+
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ stat &= ~0x0001;
+ }
+
+ if (stat & 0x0002) {
+ u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
+ nv_error(priv, "GPC%i/ZCULL trap: %08x\n", gpc,
+ trap);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ stat &= ~0x0002;
+ }
+
+ if (stat & 0x0004) {
+ u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
+ nv_error(priv, "GPC%i/CCACHE trap: %08x\n", gpc,
+ trap);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ stat &= ~0x0004;
+ }
+
+ if (stat & 0x0008) {
+ u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
+ nv_error(priv, "GPC%i/ESETUP trap %08x\n", gpc,
+ trap);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ stat &= ~0x0008;
+ }
+
+ for (tp = 0; tp < 8; ++tp) {
+ if (stat & (1 << (16 + tp)))
+ nve0_graph_tp_trap(priv, gpc, tp);
+ }
+ stat &= ~0xff0000;
+
+ if (stat) {
+ nv_error(priv, "GPC%i: unknown stat %08x\n",
+ gpc, stat);
+ }
+ }
+}
+
+
static void
nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
struct nouveau_object *engctx)
{
u32 trap = nv_rd32(priv, 0x400108);
+ int i;
int rop;
if (trap & 0x00000001) {
@@ -102,6 +298,32 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
trap &= ~0x00000010;
}
+ if (trap & 0x00000100) {
+ u32 stat = nv_rd32(priv, 0x407020);
+ nv_error(priv, "SKED ch %d [0x%010llx %s]:",
+ chid, inst, nouveau_client_name(engctx));
+
+ for (i = 0; i <= 29; ++i) {
+ if (!(stat & (1 << i)))
+ continue;
+ pr_cont(" ");
+ nouveau_enum_print(nve0_sked_error, i);
+ }
+ pr_cont("\n");
+
+ if (stat & 0x3fffffff)
+ nv_wr32(priv, 0x407020, 0x40000000);
+ nv_wr32(priv, 0x400108, 0x00000100);
+ trap &= ~0x00000100;
+ }
+
+ if (trap & 0x01000000) {
+ nv_error(priv, "GPC ch %d [0x%010llx %s]:\n",
+ chid, inst, nouveau_client_name(engctx));
+ nve0_graph_gpc_trap(priv);
+ trap &= ~0x01000000;
+ }
+
if (trap & 0x02000000) {
for (rop = 0; rop < priv->rop_nr; rop++) {
u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
@@ -217,6 +439,8 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nve0_graph_cclass;
nv_engine(priv)->sclass = nve0_graph_sclass;
+ priv->base.units = nvc0_graph_units;
+
if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
nv_info(priv, "using external firmware\n");
if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -227,11 +451,13 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->firmware = true;
}
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+ &priv->unk4188b4);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+ &priv->unk4188b8);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index a523eaa..d698e71 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -94,6 +94,32 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
return -EINVAL;
}
+static int
+nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+ u32 data = *(u32 *)args;
+
+ switch (mthd) {
+ case 0x600:
+ nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
+ break;
+ case 0x644:
+ if (data & ~0x1ffffe)
+ return -EINVAL;
+ nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
+ break;
+ case 0x6ac:
+ nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct nouveau_omthds
nvc0_software_omthds[] = {
{ 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
@@ -101,6 +127,9 @@ nvc0_software_omthds[] = {
{ 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
{ 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
{ 0x0500, 0x0500, nvc0_software_mthd_flip },
+ { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
+ { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
+ { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 92d3ab1..0a393f7 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -169,6 +169,7 @@ struct nv04_display_class {
* 8570: NVA3_DISP
* 9070: NVD0_DISP
* 9170: NVE0_DISP
+ * 9270: NVF0_DISP
*/
#define NV50_DISP_CLASS 0x00005070
@@ -178,6 +179,7 @@ struct nv04_display_class {
#define NVA3_DISP_CLASS 0x00008570
#define NVD0_DISP_CLASS 0x00009070
#define NVE0_DISP_CLASS 0x00009170
+#define NVF0_DISP_CLASS 0x00009270
#define NV50_DISP_SOR_MTHD 0x00010000
#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
@@ -246,6 +248,7 @@ struct nv50_display_class {
* 857a: NVA3_DISP_CURS
* 907a: NVD0_DISP_CURS
* 917a: NVE0_DISP_CURS
+ * 927a: NVF0_DISP_CURS
*/
#define NV50_DISP_CURS_CLASS 0x0000507a
@@ -255,6 +258,7 @@ struct nv50_display_class {
#define NVA3_DISP_CURS_CLASS 0x0000857a
#define NVD0_DISP_CURS_CLASS 0x0000907a
#define NVE0_DISP_CURS_CLASS 0x0000917a
+#define NVF0_DISP_CURS_CLASS 0x0000927a
struct nv50_display_curs_class {
u32 head;
@@ -267,6 +271,7 @@ struct nv50_display_curs_class {
* 857b: NVA3_DISP_OIMM
* 907b: NVD0_DISP_OIMM
* 917b: NVE0_DISP_OIMM
+ * 927b: NVE0_DISP_OIMM
*/
#define NV50_DISP_OIMM_CLASS 0x0000507b
@@ -276,6 +281,7 @@ struct nv50_display_curs_class {
#define NVA3_DISP_OIMM_CLASS 0x0000857b
#define NVD0_DISP_OIMM_CLASS 0x0000907b
#define NVE0_DISP_OIMM_CLASS 0x0000917b
+#define NVF0_DISP_OIMM_CLASS 0x0000927b
struct nv50_display_oimm_class {
u32 head;
@@ -288,6 +294,7 @@ struct nv50_display_oimm_class {
* 857c: NVA3_DISP_SYNC
* 907c: NVD0_DISP_SYNC
* 917c: NVE0_DISP_SYNC
+ * 927c: NVF0_DISP_SYNC
*/
#define NV50_DISP_SYNC_CLASS 0x0000507c
@@ -297,6 +304,7 @@ struct nv50_display_oimm_class {
#define NVA3_DISP_SYNC_CLASS 0x0000857c
#define NVD0_DISP_SYNC_CLASS 0x0000907c
#define NVE0_DISP_SYNC_CLASS 0x0000917c
+#define NVF0_DISP_SYNC_CLASS 0x0000927c
struct nv50_display_sync_class {
u32 pushbuf;
@@ -310,6 +318,7 @@ struct nv50_display_sync_class {
* 857d: NVA3_DISP_MAST
* 907d: NVD0_DISP_MAST
* 917d: NVE0_DISP_MAST
+ * 927d: NVF0_DISP_MAST
*/
#define NV50_DISP_MAST_CLASS 0x0000507d
@@ -319,6 +328,7 @@ struct nv50_display_sync_class {
#define NVA3_DISP_MAST_CLASS 0x0000857d
#define NVD0_DISP_MAST_CLASS 0x0000907d
#define NVE0_DISP_MAST_CLASS 0x0000917d
+#define NVF0_DISP_MAST_CLASS 0x0000927d
struct nv50_display_mast_class {
u32 pushbuf;
@@ -331,6 +341,7 @@ struct nv50_display_mast_class {
* 857e: NVA3_DISP_OVLY
* 907e: NVD0_DISP_OVLY
* 917e: NVE0_DISP_OVLY
+ * 927e: NVF0_DISP_OVLY
*/
#define NV50_DISP_OVLY_CLASS 0x0000507e
@@ -340,6 +351,7 @@ struct nv50_display_mast_class {
#define NVA3_DISP_OVLY_CLASS 0x0000857e
#define NVD0_DISP_OVLY_CLASS 0x0000907e
#define NVE0_DISP_OVLY_CLASS 0x0000917e
+#define NVF0_DISP_OVLY_CLASS 0x0000927e
struct nv50_display_ovly_class {
u32 pushbuf;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index d351a4e..05840f3 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -6,7 +6,7 @@
#include <core/engine.h>
enum nv_subdev_type {
- NVDEV_SUBDEV_DEVICE,
+ NVDEV_ENGINE_DEVICE,
NVDEV_SUBDEV_VBIOS,
/* All subdevs from DEVINIT to DEVINIT_LAST will be created before
@@ -57,7 +57,7 @@ enum nv_subdev_type {
};
struct nouveau_device {
- struct nouveau_subdev base;
+ struct nouveau_engine base;
struct list_head head;
struct pci_dev *pdev;
@@ -99,7 +99,7 @@ nv_device(void *obj)
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
- (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) {
+ (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) {
nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
nv_hclass(object), nv_hclass(device));
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 31cd852..9f5ea90 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -51,8 +51,8 @@ int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
void nouveau_parent_destroy(struct nouveau_parent *);
void _nouveau_parent_dtor(struct nouveau_object *);
-#define _nouveau_parent_init _nouveau_object_init
-#define _nouveau_parent_fini _nouveau_object_fini
+#define _nouveau_parent_init nouveau_object_init
+#define _nouveau_parent_fini nouveau_object_fini
int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
struct nouveau_object **pengine,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/engine/device.h
index c9e4c4a..b3dd2c4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/device.h
@@ -18,7 +18,6 @@ int nv50_identify(struct nouveau_device *);
int nvc0_identify(struct nouveau_device *);
int nve0_identify(struct nouveau_device *);
-extern struct nouveau_oclass nouveau_device_sclass[];
struct nouveau_device *nouveau_device_find(u64 name);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 28da677..4b21fab 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -44,5 +44,6 @@ extern struct nouveau_oclass nv94_disp_oclass;
extern struct nouveau_oclass nva3_disp_oclass;
extern struct nouveau_oclass nvd0_disp_oclass;
extern struct nouveau_oclass nve0_disp_oclass;
+extern struct nouveau_oclass nvf0_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index b46c197..633c2f8 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,7 +65,8 @@ struct nouveau_fifo_base {
struct nouveau_fifo {
struct nouveau_engine base;
- struct nouveau_event *uevent;
+ struct nouveau_event *cevent; /* channel creation event */
+ struct nouveau_event *uevent; /* async user trigger */
struct nouveau_object **channel;
spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 6943b40..5d39243 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -26,6 +26,10 @@ struct nouveau_graph_chan {
struct nouveau_graph {
struct nouveau_engine base;
+
+ /* Returns chipset-specific counts of units packed into an u64.
+ */
+ u64 (*units)(struct nouveau_graph *);
};
static inline struct nouveau_graph *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
index f351f63..a1985ed 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -4,8 +4,15 @@
#include <core/subdev.h>
#include <core/device.h>
+struct nouveau_mm_node;
+
struct nouveau_ltcg {
struct nouveau_subdev base;
+
+ int (*tags_alloc)(struct nouveau_ltcg *, u32 count,
+ struct nouveau_mm_node **);
+ void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
+ void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
};
static inline struct nouveau_ltcg *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index fded97c..d550226 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -21,18 +21,22 @@ nouveau_mc(void *obj)
}
#define nouveau_mc_create(p,e,o,d) \
- nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master", \
- sizeof(**d), (void **)d)
-#define nouveau_mc_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_mc_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_mc_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_mc_dtor _nouveau_subdev_dtor
-#define _nouveau_mc_init _nouveau_subdev_init
-#define _nouveau_mc_fini _nouveau_subdev_fini
+ nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_destroy(p) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
+})
+#define nouveau_mc_init(p) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
+})
+#define nouveau_mc_fini(p,s) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
+})
+
+int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_mc_dtor(struct nouveau_object *);
+int _nouveau_mc_init(struct nouveau_object *);
+int _nouveau_mc_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv04_mc_oclass;
extern struct nouveau_oclass nv44_mc_oclass;
@@ -40,8 +44,6 @@ extern struct nouveau_oclass nv50_mc_oclass;
extern struct nouveau_oclass nv98_mc_oclass;
extern struct nouveau_oclass nvc0_mc_oclass;
-void nouveau_mc_intr(struct nouveau_subdev *);
-
extern const struct nouveau_mc_intr nv04_mc_intr[];
int nv04_mc_init(struct nouveau_object *);
int nv50_mc_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 0b20fc0..c075998 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -73,6 +73,7 @@ int _nouveau_therm_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv40_therm_oclass;
extern struct nouveau_oclass nv50_therm_oclass;
+extern struct nouveau_oclass nv84_therm_oclass;
extern struct nouveau_oclass nva3_therm_oclass;
extern struct nouveau_oclass nvd0_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index eb49603..3bd9be2 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/reboot.h>
+#include <linux/interrupt.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index c3acf5b..649f1ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -122,18 +122,20 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
- &priv->mem);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
+ NVOBJ_FLAG_HEAP, &priv->mem);
heap = nv_object(priv->mem);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
- 0x1400 : 0x0200, 0, 0, &priv->pad);
+ ret = nouveau_gpuobj_new(nv_object(priv), heap,
+ (device->chipset == 0x50) ? 0x1400 : 0x0200,
+ 0, 0, &priv->pad);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
+ ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
+ 0, &priv->pgd);
if (ret)
return ret;
@@ -145,9 +147,9 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
- &vm->pgt[0].obj[0]);
+ ret = nouveau_gpuobj_new(nv_object(priv), heap,
+ ((limit-- - start) >> 12) * 8, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
@@ -157,7 +159,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
+ ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
if (ret)
return ret;
@@ -182,7 +184,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
+ ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 77a6fb7..f8a44956 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -101,12 +101,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* BAR3 */
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+ &priv->bar[0].mem);
mem = priv->bar[0].mem;
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+ &priv->bar[0].pgd);
if (ret)
return ret;
@@ -114,7 +116,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL,
(pci_resource_len(pdev, 3) >> 12) * 8,
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
&vm->pgt[0].obj[0]);
@@ -133,12 +135,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
/* BAR1 */
- ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+ &priv->bar[1].mem);
mem = priv->bar[1].mem;
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+ &priv->bar[1].pgd);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 9c41b58..c300b5e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -64,27 +64,33 @@ init_exec_force(struct nvbios_init *init, bool exec)
static inline int
init_or(struct nvbios_init *init)
{
- if (init->outp)
- return ffs(init->outp->or) - 1;
- error("script needs OR!!\n");
+ if (init_exec(init)) {
+ if (init->outp)
+ return ffs(init->outp->or) - 1;
+ error("script needs OR!!\n");
+ }
return 0;
}
static inline int
init_link(struct nvbios_init *init)
{
- if (init->outp)
- return !(init->outp->sorconf.link & 1);
- error("script needs OR link\n");
+ if (init_exec(init)) {
+ if (init->outp)
+ return !(init->outp->sorconf.link & 1);
+ error("script needs OR link\n");
+ }
return 0;
}
static inline int
init_crtc(struct nvbios_init *init)
{
- if (init->crtc >= 0)
- return init->crtc;
- error("script needs crtc\n");
+ if (init_exec(init)) {
+ if (init->crtc >= 0)
+ return init->crtc;
+ error("script needs crtc\n");
+ }
return 0;
}
@@ -92,16 +98,21 @@ static u8
init_conn(struct nvbios_init *init)
{
struct nouveau_bios *bios = init->bios;
+ u8 ver, len;
+ u16 conn;
- if (init->outp) {
- u8 ver, len;
- u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len);
- if (conn)
- return nv_ro08(bios, conn);
+ if (init_exec(init)) {
+ if (init->outp) {
+ conn = init->outp->connector;
+ conn = dcb_conn(bios, conn, &ver, &len);
+ if (conn)
+ return nv_ro08(bios, conn);
+ }
+
+ error("script needs connector type\n");
}
- error("script needs connector type\n");
- return 0x00;
+ return 0xff;
}
static inline u32
@@ -227,7 +238,8 @@ init_i2c(struct nvbios_init *init, int index)
} else
if (index < 0) {
if (!init->outp) {
- error("script needs output for i2c\n");
+ if (init_exec(init))
+ error("script needs output for i2c\n");
return NULL;
}
@@ -544,7 +556,8 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
return 0x6808b0 + dacoffset;
}
- error("tmds opcodes need dcb\n");
+ if (init_exec(init))
+ error("tmds opcodes need dcb\n");
} else {
if (tmds < ARRAY_SIZE(pramdac_table))
return pramdac_table[tmds];
@@ -792,7 +805,8 @@ init_dp_condition(struct nvbios_init *init)
break;
}
- warn("script needs dp output table data\n");
+ if (init_exec(init))
+ warn("script needs dp output table data\n");
break;
case 5:
if (!(init_rdauxr(init, 0x0d) & 1))
@@ -816,7 +830,7 @@ init_io_mask_or(struct nvbios_init *init)
u8 or = init_or(init);
u8 data;
- trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or);
+ trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or);
init->offset += 2;
data = init_rdvgai(init, 0x03d4, index);
@@ -835,7 +849,7 @@ init_io_or(struct nvbios_init *init)
u8 or = init_or(init);
u8 data;
- trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or);
+ trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or);
init->offset += 2;
data = init_rdvgai(init, 0x03d4, index);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 7606ed1..86ad592 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -23,6 +23,7 @@
*/
#include <subdev/fb.h>
+#include <subdev/ltcg.h>
#include <subdev/bios.h>
struct nvc0_fb_priv {
@@ -31,34 +32,14 @@ struct nvc0_fb_priv {
dma_addr_t r100c10;
};
-/* 0 = unsupported
- * 1 = non-compressed
- * 3 = compressed
- */
-static const u8 types[256] = {
- 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
- 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
- 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
- 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
- 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
- 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
-};
+extern const u8 nvc0_pte_storage_type_map[256];
+
static bool
nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
- return likely((types[memtype] == 1));
+ return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
}
static int
@@ -130,6 +111,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
int type = (memtype & 0x0ff);
int back = (memtype & 0x800);
int ret;
+ const bool comp = nvc0_pte_storage_type_map[type] != type;
size >>= 12;
align >>= 12;
@@ -142,10 +124,22 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
return -ENOMEM;
INIT_LIST_HEAD(&mem->regions);
- mem->memtype = type;
mem->size = size;
mutex_lock(&pfb->base.mutex);
+ if (comp) {
+ struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
+
+ /* compression only works with lpages */
+ if (align == (1 << (17 - 12))) {
+ int n = size >> 5;
+ ltcg->tags_alloc(ltcg, n, &mem->tag);
+ }
+ if (unlikely(!mem->tag))
+ type = nvc0_pte_storage_type_map[type];
+ }
+ mem->memtype = type;
+
do {
if (back)
ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
@@ -168,6 +162,17 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
return 0;
}
+static void
+nvc0_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+ struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
+
+ if ((*pmem)->tag)
+ ltcg->tags_free(ltcg, &(*pmem)->tag);
+
+ nv50_fb_vram_del(pfb, pmem);
+}
+
static int
nvc0_fb_init(struct nouveau_object *object)
{
@@ -178,7 +183,8 @@ nvc0_fb_init(struct nouveau_object *object)
if (ret)
return ret;
- nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+ if (priv->r100c10_page)
+ nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
return 0;
}
@@ -214,16 +220,16 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.memtype_valid = nvc0_fb_memtype_valid;
priv->base.ram.init = nvc0_fb_vram_init;
priv->base.ram.get = nvc0_fb_vram_new;
- priv->base.ram.put = nv50_fb_vram_del;
+ priv->base.ram.put = nvc0_fb_vram_del;
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!priv->r100c10_page)
- return -ENOMEM;
-
- priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c10))
- return -EFAULT;
+ if (priv->r100c10_page) {
+ priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, priv->r100c10))
+ return -EFAULT;
+ }
return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2e98e8a..8ae2625 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -140,12 +140,8 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
}
/* drop port's i2c subdev refcount, i2c handles this itself */
- if (ret == 0) {
+ if (ret == 0)
list_add_tail(&port->head, &i2c->ports);
- atomic_dec(&parent->refcount);
- atomic_dec(&engine->refcount);
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index f5bbd38..795393d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -93,7 +93,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
u32 size, u32 align, struct nouveau_object **pobject)
{
struct nouveau_object *engine = nv_object(imem);
- struct nv04_instmem_priv *priv = (void *)(imem);
int ret;
ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
@@ -101,14 +100,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
if (ret)
return ret;
- /* INSTMEM itself creates objects to reserve (and preserve across
- * suspend/resume) various fixed data locations, each one of these
- * takes a reference on INSTMEM itself, causing it to never be
- * freed. We drop all the self-references here to avoid this.
- */
- if (unlikely(!priv->created))
- atomic_dec(&engine->refcount);
-
return 0;
}
@@ -134,27 +125,28 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+ &priv->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
+ ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
- ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
/* 0x18800-0x18a00: reserve for RAMRO */
- ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
+ &priv->ramro);
if (ret)
return ret;
- priv->created = true;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index 7983d8d..b15b613 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -9,7 +9,6 @@
struct nv04_instmem_priv {
struct nouveau_instmem base;
- bool created;
void __iomem *iomem;
struct nouveau_mm heap;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index da64253..716bf41 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -82,31 +82,33 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+ &priv->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
+ ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
+ &priv->ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
- ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
+ &priv->ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap
*/
- ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
- priv->created = true;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index 078a2b9..e4940fb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -23,10 +23,17 @@
*/
#include <subdev/ltcg.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
struct nvc0_ltcg_priv {
struct nouveau_ltcg base;
+ u32 part_nr;
+ u32 part_mask;
u32 subp_nr;
+ struct nouveau_mm tags;
+ u32 num_tags;
+ struct nouveau_mm_node *tag_ram;
};
static void
@@ -62,11 +69,104 @@ nvc0_ltcg_intr(struct nouveau_subdev *subdev)
}
static int
+nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
+ struct nouveau_mm_node **pnode)
+{
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ int ret;
+
+ ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+ if (ret)
+ *pnode = NULL;
+
+ return ret;
+}
+
+static void
+nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
+{
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+
+ nouveau_mm_free(&priv->tags, pnode);
+}
+
+static void
+nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+{
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ u32 last = first + count - 1;
+ int p, i;
+
+ BUG_ON((first > last) || (last >= priv->num_tags));
+
+ nv_wr32(priv, 0x17e8cc, first);
+ nv_wr32(priv, 0x17e8d0, last);
+ nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
+
+ /* wait until it's finished with clearing */
+ for (p = 0; p < priv->part_nr; ++p) {
+ if (!(priv->part_mask & (1 << p)))
+ continue;
+ for (i = 0; i < priv->subp_nr; ++i)
+ nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
+ }
+}
+
+/* TODO: Figure out tag memory details and drop the over-cautious allocation.
+ */
+static int
+nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
+{
+ u32 tag_size, tag_margin, tag_align;
+ int ret;
+
+ nv_wr32(priv, 0x17e8d8, priv->part_nr);
+
+ /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
+ priv->num_tags = (pfb->ram.size >> 17) / 4;
+ if (priv->num_tags > (1 << 17))
+ priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
+ priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+
+ tag_align = priv->part_nr * 0x800;
+ tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
+
+ /* 4 part 4 sub: 0x2000 bytes for 56 tags */
+ /* 3 part 4 sub: 0x6000 bytes for 168 tags */
+ /*
+ * About 147 bytes per tag. Let's be safe and allocate x2, which makes
+ * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
+ *
+ * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
+ */
+ tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin;
+ tag_size += tag_align;
+ tag_size = (tag_size + 0xfff) >> 12; /* round up */
+
+ ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+ &priv->tag_ram);
+ if (ret) {
+ priv->num_tags = 0;
+ } else {
+ u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
+
+ tag_base += tag_align - 1;
+ ret = do_div(tag_base, tag_align);
+
+ nv_wr32(priv, 0x17e8d4, tag_base);
+ }
+ ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
+
+ return ret;
+}
+
+static int
nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_ltcg_priv *priv;
+ struct nouveau_fb *pfb = nouveau_fb(parent);
int ret;
ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
@@ -74,19 +174,44 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24;
+ priv->part_nr = nv_rd32(priv, 0x022438);
+ priv->part_mask = nv_rd32(priv, 0x022554);
+
+ priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ ret = nvc0_ltcg_init_tag_ram(pfb, priv);
+ if (ret)
+ return ret;
+
+ priv->base.tags_alloc = nvc0_ltcg_tags_alloc;
+ priv->base.tags_free = nvc0_ltcg_tags_free;
+ priv->base.tags_clear = nvc0_ltcg_tags_clear;
+
nv_subdev(priv)->intr = nvc0_ltcg_intr;
return 0;
}
+static void
+nvc0_ltcg_dtor(struct nouveau_object *object)
+{
+ struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
+
+ nouveau_mm_fini(&priv->tags);
+ nouveau_mm_free(&pfb->vram, &priv->tag_ram);
+
+ nouveau_ltcg_destroy(ltcg);
+}
+
struct nouveau_oclass
nvc0_ltcg_oclass = {
.handle = NV_SUBDEV(LTCG, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ltcg_ctor,
- .dtor = _nouveau_ltcg_dtor,
+ .dtor = nvc0_ltcg_dtor,
.init = _nouveau_ltcg_init,
.fini = _nouveau_ltcg_fini,
},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 8379aaf..1c0330b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -24,10 +24,10 @@
#include <subdev/mc.h>
-void
-nouveau_mc_intr(struct nouveau_subdev *subdev)
+static irqreturn_t
+nouveau_mc_intr(int irq, void *arg)
{
- struct nouveau_mc *pmc = nouveau_mc(subdev);
+ struct nouveau_mc *pmc = arg;
const struct nouveau_mc_intr *map = pmc->intr_map;
struct nouveau_subdev *unit;
u32 stat, intr;
@@ -35,7 +35,7 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
intr = stat = nv_rd32(pmc, 0x000100);
while (stat && map->stat) {
if (stat & map->stat) {
- unit = nouveau_subdev(subdev, map->unit);
+ unit = nouveau_subdev(pmc, map->unit);
if (unit && unit->intr)
unit->intr(unit);
intr &= ~map->stat;
@@ -46,4 +46,56 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
+
+ return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int
+_nouveau_mc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_mc *pmc = (void *)object;
+ nv_wr32(pmc, 0x000140, 0x00000000);
+ return nouveau_subdev_fini(&pmc->base, suspend);
+}
+
+int
+_nouveau_mc_init(struct nouveau_object *object)
+{
+ struct nouveau_mc *pmc = (void *)object;
+ int ret = nouveau_subdev_init(&pmc->base);
+ if (ret)
+ return ret;
+ nv_wr32(pmc, 0x000140, 0x00000001);
+ return 0;
+}
+
+void
+_nouveau_mc_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_mc *pmc = (void *)object;
+ free_irq(device->pdev->irq, pmc);
+ nouveau_subdev_destroy(&pmc->base);
+}
+
+int
+nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int length, void **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_mc *pmc;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
+ "master", length, pobject);
+ pmc = *pobject;
+ if (ret)
+ return ret;
+
+ ret = request_irq(device->pdev->irq, nouveau_mc_intr,
+ IRQF_SHARED, "nouveau", pmc);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 89da8fa..8c76971 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -55,7 +55,6 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->intr = nouveau_mc_intr;
priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 397d868..5191937 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -41,7 +41,6 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->intr = nouveau_mc_intr;
priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 5965add..d796924 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -57,7 +57,6 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->intr = nouveau_mc_intr;
priv->base.intr_map = nv50_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 3a80b29..e82fd21 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -59,7 +59,6 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->intr = nouveau_mc_intr;
priv->base.intr_map = nv98_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 42bbf72..737bd4b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -61,7 +61,6 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_subdev(priv)->intr = nouveau_mc_intr;
priv->base.intr_map = nvc0_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index a70d1b7..002e51b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -165,7 +165,7 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
return 0;
}
-static void
+void
nv40_therm_intr(struct nouveau_subdev *subdev)
{
struct nouveau_therm *therm = nouveau_therm(subdev);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 86632cb..8cf7597 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -118,145 +118,36 @@ nv50_fan_pwm_clock(struct nouveau_therm *therm)
return pwm_clock;
}
-int
-nv50_temp_get(struct nouveau_therm *therm)
-{
- return nv_rd32(therm, 0x20400);
-}
-
-static void
-nv50_therm_program_alarms(struct nouveau_therm *therm)
-{
- struct nouveau_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
-
- /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
- nv_wr32(therm, 0x20000, 0x000003ff);
-
- /* shutdown: The computer should be shutdown when reached */
- nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
- nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
-
- /* THRS_1 : fan boost*/
- nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
-
- /* THRS_2 : critical */
- nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
-
- /* THRS_4 : down clock */
- nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
-
- nv_info(therm,
- "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
- sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
- sensor->thrs_down_clock.temp,
- sensor->thrs_down_clock.hysteresis,
- sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
- sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
-
-}
-
-/* must be called with alarm_program_lock taken ! */
static void
-nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
- uint32_t thrs_reg, u8 status_bit,
- const struct nvbios_therm_threshold *thrs,
- enum nouveau_therm_thrs thrs_name)
+nv50_sensor_setup(struct nouveau_therm *therm)
{
- enum nouveau_therm_thrs_direction direction;
- enum nouveau_therm_thrs_state prev_state, new_state;
- int temp, cur;
-
- prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
- temp = nv_rd32(therm, thrs_reg);
-
- /* program the next threshold */
- if (temp == thrs->temp) {
- nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
- new_state = NOUVEAU_THERM_THRS_HIGHER;
- } else {
- nv_wr32(therm, thrs_reg, thrs->temp);
- new_state = NOUVEAU_THERM_THRS_LOWER;
- }
-
- /* fix the state (in case someone reprogrammed the alarms) */
- cur = therm->temp_get(therm);
- if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
- new_state = NOUVEAU_THERM_THRS_HIGHER;
- else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
- cur < thrs->temp - thrs->hysteresis)
- new_state = NOUVEAU_THERM_THRS_LOWER;
- nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
-
- /* find the direction */
- if (prev_state < new_state)
- direction = NOUVEAU_THERM_THRS_RISING;
- else if (prev_state > new_state)
- direction = NOUVEAU_THERM_THRS_FALLING;
- else
- return;
-
- /* advertise a change in direction */
- nouveau_therm_sensor_event(therm, thrs_name, direction);
+ nv_mask(therm, 0x20010, 0x40000000, 0x0);
+ mdelay(20); /* wait for the temperature to stabilize */
}
-static void
-nv50_therm_intr(struct nouveau_subdev *subdev)
+static int
+nv50_temp_get(struct nouveau_therm *therm)
{
- struct nouveau_therm *therm = nouveau_therm(subdev);
struct nouveau_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
- unsigned long flags;
- uint32_t intr;
-
- spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
-
- intr = nv_rd32(therm, 0x20100);
-
- /* THRS_4: downclock */
- if (intr & 0x002) {
- nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
- &sensor->thrs_down_clock,
- NOUVEAU_THERM_THRS_DOWNCLOCK);
- intr &= ~0x002;
- }
+ int core_temp;
- /* shutdown */
- if (intr & 0x004) {
- nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
- &sensor->thrs_shutdown,
- NOUVEAU_THERM_THRS_SHUTDOWN);
- intr &= ~0x004;
- }
-
- /* THRS_1 : fan boost */
- if (intr & 0x008) {
- nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
- &sensor->thrs_fan_boost,
- NOUVEAU_THERM_THRS_FANBOOST);
- intr &= ~0x008;
- }
+ core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
- /* THRS_2 : critical */
- if (intr & 0x010) {
- nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
- &sensor->thrs_critical,
- NOUVEAU_THERM_THRS_CRITICAL);
- intr &= ~0x010;
- }
+ /* if the slope or the offset is unset, do no use the sensor */
+ if (!sensor->slope_div || !sensor->slope_mult ||
+ !sensor->offset_num || !sensor->offset_den)
+ return -ENODEV;
- if (intr)
- nv_error(therm, "unhandled intr 0x%08x\n", intr);
+ core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+ core_temp = core_temp + sensor->offset_num / sensor->offset_den;
+ core_temp = core_temp + sensor->offset_constant - 8;
- /* ACK everything */
- nv_wr32(therm, 0x20100, 0xffffffff);
- nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+ /* reserve negative temperatures for errors */
+ if (core_temp < 0)
+ core_temp = 0;
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ return core_temp;
}
static int
@@ -278,33 +169,29 @@ nv50_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_set = nv50_fan_pwm_set;
priv->base.base.pwm_clock = nv50_fan_pwm_clock;
priv->base.base.temp_get = nv50_temp_get;
- priv->base.sensor.program_alarms = nv50_therm_program_alarms;
- nv_subdev(priv)->intr = nv50_therm_intr;
-
- /* init the thresholds */
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_SHUTDOWN,
- NOUVEAU_THERM_THRS_LOWER);
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_FANBOOST,
- NOUVEAU_THERM_THRS_LOWER);
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_CRITICAL,
- NOUVEAU_THERM_THRS_LOWER);
- nouveau_therm_sensor_set_threshold_state(&priv->base.base,
- NOUVEAU_THERM_THRS_DOWNCLOCK,
- NOUVEAU_THERM_THRS_LOWER);
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ nv_subdev(priv)->intr = nv40_therm_intr;
return nouveau_therm_preinit(&priv->base.base);
}
+static int
+nv50_therm_init(struct nouveau_object *object)
+{
+ struct nouveau_therm *therm = (void *)object;
+
+ nv50_sensor_setup(therm);
+
+ return _nouveau_therm_init(object);
+}
+
struct nouveau_oclass
nv50_therm_oclass = {
.handle = NV_SUBDEV(THERM, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = _nouveau_therm_init,
+ .init = nv50_therm_init,
.fini = _nouveau_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
new file mode 100644
index 0000000..42ba633
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * Martin Peres
+ */
+
+#include "priv.h"
+
+struct nv84_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+int
+nv84_temp_get(struct nouveau_therm *therm)
+{
+ return nv_rd32(therm, 0x20400);
+}
+
+static void
+nv84_therm_program_alarms(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
+ nv_wr32(therm, 0x20000, 0x000003ff);
+
+ /* shutdown: The computer should be shutdown when reached */
+ nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
+ nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+
+ /* THRS_1 : fan boost*/
+ nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+
+ /* THRS_2 : critical */
+ nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+
+ /* THRS_4 : down clock */
+ nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
+ nv_debug(therm,
+ "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+ uint32_t thrs_reg, u8 status_bit,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp, cur;
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+ temp = nv_rd32(therm, thrs_reg);
+
+ /* program the next threshold */
+ if (temp == thrs->temp) {
+ nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else {
+ nv_wr32(therm, thrs_reg, thrs->temp);
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ }
+
+ /* fix the state (in case someone reprogrammed the alarms) */
+ cur = therm->temp_get(therm);
+ if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+ cur < thrs->temp - thrs->hysteresis)
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+
+ /* find the direction */
+ if (prev_state < new_state)
+ direction = NOUVEAU_THERM_THRS_RISING;
+ else if (prev_state > new_state)
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ else
+ return;
+
+ /* advertise a change in direction */
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+nv84_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+ uint32_t intr;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ intr = nv_rd32(therm, 0x20100);
+
+ /* THRS_4: downclock */
+ if (intr & 0x002) {
+ nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+ &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+ intr &= ~0x002;
+ }
+
+ /* shutdown */
+ if (intr & 0x004) {
+ nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+ &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+ intr &= ~0x004;
+ }
+
+ /* THRS_1 : fan boost */
+ if (intr & 0x008) {
+ nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+ &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+ intr &= ~0x008;
+ }
+
+ /* THRS_2 : critical */
+ if (intr & 0x010) {
+ nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+ &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+ intr &= ~0x010;
+ }
+
+ if (intr)
+ nv_error(therm, "unhandled intr 0x%08x\n", intr);
+
+ /* ACK everything */
+ nv_wr32(therm, 0x20100, 0xffffffff);
+ nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+static int
+nv84_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv84_temp_get;
+ priv->base.sensor.program_alarms = nv84_therm_program_alarms;
+ nv_subdev(priv)->intr = nv84_therm_intr;
+
+ /* init the thresholds */
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_SHUTDOWN,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_FANBOOST,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_CRITICAL,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_DOWNCLOCK,
+ NOUVEAU_THERM_THRS_LOWER);
+
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nv84_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = _nouveau_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index 2dcc543..d11a7c4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -81,7 +81,7 @@ nva3_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_get = nv50_fan_pwm_get;
priv->base.base.pwm_set = nv50_fan_pwm_set;
priv->base.base.pwm_clock = nv50_fan_pwm_clock;
- priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.temp_get = nv84_temp_get;
priv->base.base.fan_sense = nva3_therm_fan_sense;
priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
return nouveau_therm_preinit(&priv->base.base);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index d7d30ee..54c28bd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -135,7 +135,7 @@ nvd0_therm_ctor(struct nouveau_object *parent,
priv->base.base.pwm_get = nvd0_fan_pwm_get;
priv->base.base.pwm_set = nvd0_fan_pwm_set;
priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
- priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.temp_get = nv84_temp_get;
priv->base.base.fan_sense = nva3_therm_fan_sense;
priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
return nouveau_therm_preinit(&priv->base.base);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 438d982..15ca64e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -134,11 +134,12 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
enum nouveau_therm_thrs_direction dir);
void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
+void nv40_therm_intr(struct nouveau_subdev *);
int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
int nv50_fan_pwm_clock(struct nouveau_therm *);
-int nv50_temp_get(struct nouveau_therm *therm);
+int nv84_temp_get(struct nouveau_therm *therm);
int nva3_therm_fan_sense(struct nouveau_therm *);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 470f6a4..dde746c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -205,13 +205,13 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
struct nouveau_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
- nv_info(therm,
- "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
- sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
- sensor->thrs_down_clock.temp,
- sensor->thrs_down_clock.hysteresis,
- sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
- sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+ nv_debug(therm,
+ "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 8e1bae4..9469b82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -96,11 +96,16 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
/* append new alarm to list, in soonest-alarm-first order */
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(list, &priv->alarms, head) {
- if (list->timestamp > alarm->timestamp)
- break;
+ if (!time) {
+ if (!list_empty(&alarm->head))
+ list_del(&alarm->head);
+ } else {
+ list_for_each_entry(list, &priv->alarms, head) {
+ if (list->timestamp > alarm->timestamp)
+ break;
+ }
+ list_add_tail(&alarm->head, &list->head);
}
- list_add_tail(&alarm->head, &list->head);
spin_unlock_irqrestore(&priv->lock, flags);
/* process pending alarms */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
index 6adbbc9..ed45437 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -110,7 +110,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
8, 16, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 9474cfc..064c762 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -119,7 +119,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL,
(NV41_GART_SIZE / NV41_GART_PAGE) * 4,
16, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index aa81314..fae1f67 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -196,7 +196,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- ret = nouveau_gpuobj_new(parent, NULL,
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL,
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 30c61e6..4c3b0a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -28,12 +28,54 @@
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
+#include <subdev/ltcg.h>
struct nvc0_vmmgr_priv {
struct nouveau_vmmgr base;
spinlock_t lock;
};
+
+/* Map from compressed to corresponding uncompressed storage type.
+ * The value 0xff represents an invalid storage type.
+ */
+const u8 nvc0_pte_storage_type_map[256] =
+{
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+};
+
+
static void
nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
struct nouveau_gpuobj *pgt[2])
@@ -68,10 +110,20 @@ static void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
- u32 next = 1 << (vma->node->type - 8);
+ u64 next = 1 << (vma->node->type - 8);
phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
+
+ if (mem->tag) {
+ struct nouveau_ltcg *ltcg =
+ nouveau_ltcg(vma->vm->vmm->base.base.parent);
+ u32 tag = mem->tag->offset + (delta >> 17);
+ phys |= (u64)tag << (32 + 12);
+ next |= (u64)1 << (32 + 12);
+ ltcg->tags_clear(ltcg, tag, cnt);
+ }
+
while (cnt--) {
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
@@ -85,10 +137,12 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
+ /* compressed storage types are invalid for system memory */
+ u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
+ u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
new file mode 100644
index 0000000..ea3f5b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -0,0 +1,10 @@
+nouveau-y += dispnv04/arb.o
+nouveau-y += dispnv04/crtc.o
+nouveau-y += dispnv04/cursor.o
+nouveau-y += dispnv04/dac.o
+nouveau-y += dispnv04/dfp.o
+nouveau-y += dispnv04/disp.o
+nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/tvmodesnv17.o
+nouveau-y += dispnv04/tvnv04.o
+nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 6da5764..2e70462 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -25,7 +25,7 @@
#include "nouveau_drm.h"
#include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "hw.h"
/****************************************************************************\
* *
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6578cd2..0782bd2 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -33,10 +33,10 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
#include "nvreg.h"
#include "nouveau_fbcon.h"
-#include "nv04_display.h"
+#include "disp.h"
#include <subdev/bios/pll.h>
#include <subdev/clock.h>
@@ -1070,4 +1070,3 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index fe86f0d..a810303 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -3,7 +3,7 @@
#include "nouveau_drm.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
static void
nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
@@ -68,4 +68,3 @@ nv04_cursor_init(struct nouveau_crtc *crtc)
crtc->cursor.show = nv04_cursor_show;
return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 64f7020..434b920 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -31,7 +31,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
#include "nvreg.h"
#include <subdev/bios/gpio.h>
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7e24cdf..93dd23ff 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -32,7 +32,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
#include "nvreg.h"
#include <drm/i2c/sil164.h>
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index ad48444..4908d3f 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,7 +30,7 @@
#include "nouveau_drm.h"
#include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "hw.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031d..a0a031d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 617a06f..973056b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -24,7 +24,7 @@
#include <drm/drmP.h>
#include "nouveau_drm.h"
-#include "nouveau_hw.h"
+#include "hw.h"
#include <subdev/bios/pll.h>
#include <subdev/clock.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 7dff102..eeb70d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -24,7 +24,8 @@
#define __NOUVEAU_HW_H__
#include <drm/drmP.h>
-#include "nv04_display.h"
+#include "disp.h"
+#include "nvreg.h"
#include <subdev/bios/pll.h>
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
index bbfb1a6..bbfb1a6 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 1cdfe2a..08c6f5e 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -29,8 +29,8 @@
#include "nouveau_drm.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
-#include "nv17_tv.h"
+#include "hw.h"
+#include "tvnv17.h"
char *nv17_tv_norm_names[NUM_TV_NORMS] = {
[TV_NORM_PAL] = "PAL",
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 4a69ccd..bf13db4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -30,7 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
#include <drm/drm_crtc_helper.h>
#include <drm/i2c/ch7006.h>
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 977e42b..acef48f 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -31,8 +31,8 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_hw.h"
-#include "nv17_tv.h"
+#include "hw.h"
+#include "tvnv17.h"
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 7b33154..7b33154 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 5eb3e0d..1c4c6c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -30,6 +30,7 @@
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <subdev/instmem.h>
+#include <engine/graph.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -168,6 +169,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_timer *ptimer = nouveau_timer(device);
+ struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
struct drm_nouveau_getparam *getparam = data;
switch (getparam->param) {
@@ -208,14 +210,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
- /* NV40 and NV50 versions are quite different, but register
- * address is the same. User is supposed to know the card
- * family anyway... */
- if (device->chipset >= 0x40) {
- getparam->value = nv_rd32(device, 0x001540);
- break;
- }
- /* FALLTHRU */
+ getparam->value = graph->units ? graph->units(graph) : 0;
+ break;
default:
nv_debug(device, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5d94030..2ffad21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -239,6 +239,9 @@ nouveau_backlight_init(struct drm_device *dev)
case NV_40:
return nv40_backlight_init(connector);
case NV_50:
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
return nv50_backlight_init(connector);
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 50a6dd0..6aa2137 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -28,7 +28,7 @@
#include "nouveau_drm.h"
#include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
#include <linux/io-mapping.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 7ccd28f..0067586 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -24,8 +24,6 @@
#ifndef __NOUVEAU_DISPBIOS_H__
#define __NOUVEAU_DISPBIOS_H__
-#include "nvreg.h"
-
#define DCB_MAX_NUM_ENTRIES 16
#define DCB_MAX_NUM_I2C_ENTRIES 16
#define DCB_MAX_NUM_GPIO_ENTRIES 32
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4dd7ae2..4da776f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -32,7 +32,7 @@
#include "nouveau_reg.h"
#include "nouveau_drm.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include "nouveau_acpi.h"
#include "nouveau_display.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 4610c3a..7bf22d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -28,7 +28,7 @@
#include <drm/drm_crtc_helper.h>
#include "nouveau_fbcon.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c95decf..46c152f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -31,13 +31,13 @@
#include <core/gpuobj.h>
#include <core/class.h>
-#include <subdev/device.h>
-#include <subdev/vm.h>
-
+#include <engine/device.h>
#include <engine/disp.h>
+#include <engine/fifo.h>
+
+#include <subdev/vm.h>
#include "nouveau_drm.h"
-#include "nouveau_irq.h"
#include "nouveau_dma.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
@@ -165,7 +165,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
u32 arg0, arg1;
int ret;
- if (nouveau_noaccel)
+ if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
return;
/* initialise synchronisation routines */
@@ -365,10 +365,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_bios;
- ret = nouveau_irq_init(dev);
- if (ret)
- goto fail_irq;
-
ret = nouveau_display_create(dev);
if (ret)
goto fail_dispctor;
@@ -388,8 +384,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
fail_dispinit:
nouveau_display_destroy(dev);
fail_dispctor:
- nouveau_irq_fini(dev);
-fail_irq:
nouveau_bios_takedown(dev);
fail_bios:
nouveau_ttm_fini(drm);
@@ -415,7 +409,6 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_display_fini(dev);
nouveau_display_destroy(dev);
- nouveau_irq_fini(dev);
nouveau_bios_takedown(dev);
nouveau_ttm_fini(drm);
@@ -533,7 +526,6 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_fence(drm)->resume(drm);
nouveau_run_vbios_init(dev);
- nouveau_irq_postinstall(dev);
nouveau_pm_resume(dev);
if (dev->mode_config.num_crtc) {
@@ -669,8 +661,7 @@ static struct drm_driver
driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
.load = nouveau_drm_load,
.unload = nouveau_drm_unload,
@@ -684,11 +675,6 @@ driver = {
.debugfs_cleanup = nouveau_debugfs_takedown,
#endif
- .irq_preinstall = nouveau_irq_preinstall,
- .irq_postinstall = nouveau_irq_postinstall,
- .irq_uninstall = nouveau_irq_uninstall,
- .irq_handler = nouveau_irq_handler,
-
.get_vblank_counter = drm_vblank_count,
.enable_vblank = nouveau_drm_vblank_enable,
.disable_vblank = nouveau_drm_vblank_disable,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 9c39baf..f2b30f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,18 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
-#define DRIVER_PATCHLEVEL 0
+#define DRIVER_PATCHLEVEL 1
+
+/*
+ * 1.1.1:
+ * - added support for tiled system memory buffer objects
+ * - added support for NOUVEAU_GETPARAM_GRAPH_UNITS on [nvc0,nve0].
+ * - added support for compressed memory storage types on [nvc0,nve0].
+ * - added support for software methods 0x600,0x644,0x6ac on nvc0
+ * to control registers on the MPs to enable performance counters,
+ * and to control the warp error enable mask (OpenGL requires out of
+ * bounds access to local memory to be silently ignored / return 0).
+ */
#include <core/client.h>
#include <core/event.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e243412..24660c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,7 +30,7 @@
#include <subdev/bios/dcb.h>
#include <drm/drm_encoder_slave.h>
-#include "nv04_display.h"
+#include "dispnv04/disp.h"
#define NV_DPMS_CLEARED 0x80
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
deleted file mode 100644
index 1303680..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <subdev/mc.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_irq.h"
-#include "nv50_display.h"
-
-void
-nouveau_irq_preinstall(struct drm_device *dev)
-{
- nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
-}
-
-int
-nouveau_irq_postinstall(struct drm_device *dev)
-{
- nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
- return 0;
-}
-
-void
-nouveau_irq_uninstall(struct drm_device *dev)
-{
- nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
-}
-
-irqreturn_t
-nouveau_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = arg;
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_mc *pmc = nouveau_mc(device);
- u32 stat;
-
- stat = nv_rd32(device, 0x000100);
- if (stat == 0 || stat == ~0)
- return IRQ_NONE;
-
- nv_subdev(pmc)->intr(nv_subdev(pmc));
- return IRQ_HANDLED;
-}
-
-int
-nouveau_irq_init(struct drm_device *dev)
-{
- return drm_irq_install(dev);
-}
-
-void
-nouveau_irq_fini(struct drm_device *dev)
-{
- drm_irq_uninstall(dev);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
deleted file mode 100644
index 06714ad..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_irq.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __NOUVEAU_IRQ_H__
-#define __NOUVEAU_IRQ_H__
-
-extern int nouveau_irq_init(struct drm_device *);
-extern void nouveau_irq_fini(struct drm_device *);
-extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
-extern void nouveau_irq_preinstall(struct drm_device *);
-extern int nouveau_irq_postinstall(struct drm_device *);
-extern void nouveau_irq_uninstall(struct drm_device *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 9be9cb5..f19a15a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -35,14 +35,16 @@
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
- /* nothing to do */
+ struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ man->priv = pfb;
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
- /* nothing to do */
+ man->priv = NULL;
return 0;
}
@@ -104,7 +106,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
static void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_fb *pfb = man->priv;
+ struct nouveau_mm *mm = &pfb->vram;
struct nouveau_mm_node *r;
u32 total = 0, free = 0;
@@ -161,6 +164,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
@@ -171,6 +176,20 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
return -ENOMEM;
node->page_shift = 12;
+ switch (nv_device(drm->device)->card_type) {
+ case NV_50:
+ if (nv_device(drm->device)->chipset != 0x50)
+ node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
+ break;
+ default:
+ break;
+ }
+
mem->mm_node = node;
mem->start = 0;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 2a0cc9d..27afc0e 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include "nouveau_drm.h"
#include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include "nouveau_pm.h"
#include <subdev/bios/pll.h>
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3382064..3af5bcd 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -26,7 +26,7 @@
#include "nouveau_drm.h"
#include "nouveau_bios.h"
#include "nouveau_pm.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include <subdev/bios/pll.h>
#include <subdev/clock.h>
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 1ddc03e..ebf0a68 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2174,6 +2174,7 @@ int
nv50_display_create(struct drm_device *dev)
{
static const u16 oclass[] = {
+ NVF0_DISP_CLASS,
NVE0_DISP_CLASS,
NVD0_DISP_CLASS,
NVA3_DISP_CLASS,
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 8bd5d27..69620e3 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include "nouveau_drm.h"
#include "nouveau_bios.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include "nouveau_pm.h"
#include "nouveau_hwsq.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index c451c41..912759d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -110,6 +110,11 @@ static enum drm_connector_status omap_connector_detect(
ret = connector_status_connected;
else
ret = connector_status_disconnected;
+ } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
+ dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
+ dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
+ dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
+ ret = connector_status_connected;
} else {
ret = connector_status_unknown;
}
@@ -189,12 +194,30 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct omap_video_timings timings = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
- int ret = MODE_BAD;
+ int r, ret = MODE_BAD;
copy_timings_drm_to_omap(&timings, mode);
mode->vrefresh = drm_mode_vrefresh(mode);
- if (!dssdrv->check_timings(dssdev, &timings)) {
+ /*
+ * if the panel driver doesn't have a check_timings, it's most likely
+ * a fixed resolution panel, check if the timings match with the
+ * panel's timings
+ */
+ if (dssdrv->check_timings) {
+ r = dssdrv->check_timings(dssdev, &timings);
+ } else {
+ struct omap_video_timings t = {0};
+
+ dssdrv->get_timings(dssdev, &t);
+
+ if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+ r = -EINVAL;
+ else
+ r = 0;
+ }
+
+ if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
new_mode->clock = timings.pixel_clock;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index bec66a4..79b200a 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -74,6 +74,13 @@ struct omap_crtc {
struct work_struct page_flip_work;
};
+uint32_t pipe2vbl(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ return dispc_mgr_get_vsync_irq(omap_crtc->channel);
+}
+
/*
* Manager-ops, callbacks from output when they need to configure
* the upstream part of the video pipe.
@@ -613,7 +620,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
omap_crtc->apply.post_apply = omap_crtc_post_apply;
- omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+ omap_crtc->channel = channel;
+ omap_crtc->plane = plane;
+ omap_crtc->plane->crtc = crtc;
+ omap_crtc->name = channel_names[channel];
+ omap_crtc->pipe = id;
+
+ omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
omap_crtc->error_irq.irqmask =
@@ -621,12 +634,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->error_irq.irq = omap_crtc_error_irq;
omap_irq_register(dev, &omap_crtc->error_irq);
- omap_crtc->channel = channel;
- omap_crtc->plane = plane;
- omap_crtc->plane->crtc = crtc;
- omap_crtc->name = channel_names[channel];
- omap_crtc->pipe = id;
-
/* temporary: */
omap_crtc->mgr.id = channel;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 079c54c..9c53c25 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -74,54 +74,53 @@ static int get_connector_type(struct omap_dss_device *dssdev)
}
}
+static bool channel_used(struct drm_device *dev, enum omap_channel channel)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < priv->num_crtcs; i++) {
+ struct drm_crtc *crtc = priv->crtcs[i];
+
+ if (omap_crtc_channel(crtc) == channel)
+ return true;
+ }
+
+ return false;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_dss_device *dssdev = NULL;
int num_ovls = dss_feat_get_num_ovls();
- int id;
+ int num_mgrs = dss_feat_get_num_mgrs();
+ int num_crtcs;
+ int i, id = 0;
drm_mode_config_init(dev);
omap_drm_irq_install(dev);
/*
- * Create private planes and CRTCs for the last NUM_CRTCs overlay
- * plus manager:
+ * We usually don't want to create a CRTC for each manager, at least
+ * not until we have a way to expose private planes to userspace.
+ * Otherwise there would not be enough video pipes left for drm planes.
+ * We use the num_crtc argument to limit the number of crtcs we create.
*/
- for (id = 0; id < min(num_crtc, num_ovls); id++) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
+ num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
- BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
- }
-
- /*
- * Create normal planes for the remaining overlays:
- */
- for (; id < num_ovls; id++) {
- struct drm_plane *plane = omap_plane_init(dev, id, false);
-
- BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
- priv->planes[priv->num_planes++] = plane;
- }
+ dssdev = NULL;
for_each_dss_dev(dssdev) {
struct drm_connector *connector;
struct drm_encoder *encoder;
+ enum omap_channel channel;
if (!dssdev->driver) {
dev_warn(dev->dev, "%s has no driver.. skipping it\n",
dssdev->name);
- return 0;
+ continue;
}
if (!(dssdev->driver->get_timings ||
@@ -129,7 +128,7 @@ static int omap_modeset_init(struct drm_device *dev)
dev_warn(dev->dev, "%s driver does not support "
"get_timings or read_edid.. skipping it!\n",
dssdev->name);
- return 0;
+ continue;
}
encoder = omap_encoder_init(dev, dssdev);
@@ -157,16 +156,118 @@ static int omap_modeset_init(struct drm_device *dev)
drm_mode_connector_attach_encoder(connector, encoder);
+ /*
+ * if we have reached the limit of the crtcs we are allowed to
+ * create, let's not try to look for a crtc for this
+ * panel/encoder and onwards, we will, of course, populate the
+ * the possible_crtcs field for all the encoders with the final
+ * set of crtcs we create
+ */
+ if (id == num_crtcs)
+ continue;
+
+ /*
+ * get the recommended DISPC channel for this encoder. For now,
+ * we only try to get create a crtc out of the recommended, the
+ * other possible channels to which the encoder can connect are
+ * not considered.
+ */
+ channel = dssdev->output->dispc_channel;
+
+ /*
+ * if this channel hasn't already been taken by a previously
+ * allocated crtc, we create a new crtc for it
+ */
+ if (!channel_used(dev, channel)) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = omap_plane_init(dev, id, true);
+ crtc = omap_crtc_init(dev, plane, channel, id);
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
+
+ priv->planes[id] = plane;
+ priv->num_planes++;
+
+ id++;
+ }
+ }
+
+ /*
+ * we have allocated crtcs according to the need of the panels/encoders,
+ * adding more crtcs here if needed
+ */
+ for (; id < num_crtcs; id++) {
+
+ /* find a free manager for this crtc */
+ for (i = 0; i < num_mgrs; i++) {
+ if (!channel_used(dev, i)) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = omap_plane_init(dev, id, true);
+ crtc = omap_crtc_init(dev, plane, i, id);
+
+ BUG_ON(priv->num_crtcs >=
+ ARRAY_SIZE(priv->crtcs));
+
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
+
+ priv->planes[id] = plane;
+ priv->num_planes++;
+
+ break;
+ } else {
+ continue;
+ }
+ }
+
+ if (i == num_mgrs) {
+ /* this shouldn't really happen */
+ dev_err(dev->dev, "no managers left for crtc\n");
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Create normal planes for the remaining overlays:
+ */
+ for (; id < num_ovls; id++) {
+ struct drm_plane *plane = omap_plane_init(dev, id, false);
+
+ BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+ priv->planes[priv->num_planes++] = plane;
+ }
+
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+ struct omap_dss_device *dssdev =
+ omap_encoder_get_dssdev(encoder);
+
/* figure out which crtc's we can connect the encoder to: */
encoder->possible_crtcs = 0;
for (id = 0; id < priv->num_crtcs; id++) {
- enum omap_dss_output_id supported_outputs =
- dss_feat_get_supported_outputs(pipe2chan(id));
+ struct drm_crtc *crtc = priv->crtcs[id];
+ enum omap_channel crtc_channel;
+ enum omap_dss_output_id supported_outputs;
+
+ crtc_channel = omap_crtc_channel(crtc);
+ supported_outputs =
+ dss_feat_get_supported_outputs(crtc_channel);
+
if (supported_outputs & dssdev->output->id)
encoder->possible_crtcs |= (1 << id);
}
}
+ DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
+ priv->num_planes, priv->num_crtcs, priv->num_encoders,
+ priv->num_connectors);
+
dev->mode_config.min_width = 32;
dev->mode_config.min_height = 32;
@@ -303,7 +404,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -567,7 +668,7 @@ static const struct dev_pm_ops omapdrm_pm_ops = {
};
#endif
-struct platform_driver pdev = {
+static struct platform_driver pdev = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index d4f997b..215a20d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -139,8 +139,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
int omap_gem_resume(struct device *dev);
#endif
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
void omap_irq_preinstall(struct drm_device *dev);
int omap_irq_postinstall(struct drm_device *dev);
@@ -271,39 +271,9 @@ static inline int align_pitch(int pitch, int width, int bpp)
return ALIGN(pitch, 8 * bytespp);
}
-static inline enum omap_channel pipe2chan(int pipe)
-{
- int num_mgrs = dss_feat_get_num_mgrs();
-
- /*
- * We usually don't want to create a CRTC for each manager,
- * at least not until we have a way to expose private planes
- * to userspace. Otherwise there would not be enough video
- * pipes left for drm planes. The higher #'d managers tend
- * to have more features so start in reverse order.
- */
- return num_mgrs - pipe - 1;
-}
-
/* map crtc to vblank mask */
-static inline uint32_t pipe2vbl(int crtc)
-{
- enum omap_channel channel = pipe2chan(crtc);
- return dispc_mgr_get_vsync_irq(channel);
-}
-
-static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct omap_drm_private *priv = dev->dev_private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
- if (priv->crtcs[i] == crtc)
- return i;
-
- BUG(); /* bogus CRTC ptr */
- return -1;
-}
+uint32_t pipe2vbl(struct drm_crtc *crtc);
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
/* should these be made into common util helpers?
*/
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 21d126d..c29451b 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -41,6 +41,13 @@ struct omap_encoder {
struct omap_dss_device *dssdev;
};
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+ return omap_encoder->dssdev;
+}
+
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -128,13 +135,26 @@ int omap_encoder_update(struct drm_encoder *encoder,
dssdev->output->manager = mgr;
- ret = dssdrv->check_timings(dssdev, timings);
+ if (dssdrv->check_timings) {
+ ret = dssdrv->check_timings(dssdev, timings);
+ } else {
+ struct omap_video_timings t = {0};
+
+ dssdrv->get_timings(dssdev, &t);
+
+ if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+ ret = -EINVAL;
+ else
+ ret = 0;
+ }
+
if (ret) {
dev_err(dev->dev, "could not set timings: %d\n", ret);
return ret;
}
- dssdrv->set_timings(dssdev, timings);
+ if (dssdrv->set_timings)
+ dssdrv->set_timings(dssdev, timings);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ac74d1b..be7cd97 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -178,7 +178,7 @@ out_unlock:
return omap_gem_mmap_obj(obj, vma);
}
-struct dma_buf_ops omap_dmabuf_ops = {
+static struct dma_buf_ops omap_dmabuf_ops = {
.map_dma_buf = omap_gem_map_dma_buf,
.unmap_dma_buf = omap_gem_unmap_dma_buf,
.release = omap_gem_dmabuf_release,
@@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
- dma_buf_put(buffer);
return obj;
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index e01303e..9263db1 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -130,12 +130,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
* Zero on success, appropriate errno if the given @crtc's vblank
* interrupt cannot be enabled.
*/
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
{
struct omap_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = priv->crtcs[crtc_id];
unsigned long flags;
- DBG("dev=%p, crtc=%d", dev, crtc);
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
dispc_runtime_get();
spin_lock_irqsave(&list_lock, flags);
@@ -156,12 +157,13 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*/
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
{
struct omap_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = priv->crtcs[crtc_id];
unsigned long flags;
- DBG("dev=%p, crtc=%d", dev, crtc);
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
dispc_runtime_get();
spin_lock_irqsave(&list_lock, flags);
@@ -186,9 +188,12 @@ irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
VERB("irqs: %08x", irqstatus);
- for (id = 0; id < priv->num_crtcs; id++)
- if (irqstatus & pipe2vbl(id))
+ for (id = 0; id < priv->num_crtcs; id++) {
+ struct drm_crtc *crtc = priv->crtcs[id];
+
+ if (irqstatus & pipe2vbl(crtc))
drm_handle_vblank(dev, id);
+ }
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 2882cda..8d225d7 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -247,6 +247,12 @@ static int omap_plane_update(struct drm_plane *plane,
{
struct omap_plane *omap_plane = to_omap_plane(plane);
omap_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
return omap_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
new file mode 100644
index 0000000..2f1a57e
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -0,0 +1,10 @@
+config DRM_QXL
+ tristate "QXL virtual GPU"
+ depends on DRM && PCI
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
new file mode 100644
index 0000000..ea046ba
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
+
+obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644
index 0000000..08b0823
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+/* QXL cmd/ring handling */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
+
+struct ring {
+ struct qxl_ring_header header;
+ uint8_t elements[0];
+};
+
+struct qxl_ring {
+ struct ring *ring;
+ int element_size;
+ int n_elements;
+ int prod_notify;
+ wait_queue_head_t *push_event;
+ spinlock_t lock;
+};
+
+void qxl_ring_free(struct qxl_ring *ring)
+{
+ kfree(ring);
+}
+
+struct qxl_ring *
+qxl_ring_create(struct qxl_ring_header *header,
+ int element_size,
+ int n_elements,
+ int prod_notify,
+ bool set_prod_notify,
+ wait_queue_head_t *push_event)
+{
+ struct qxl_ring *ring;
+
+ ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ ring->ring = (struct ring *)header;
+ ring->element_size = element_size;
+ ring->n_elements = n_elements;
+ ring->prod_notify = prod_notify;
+ ring->push_event = push_event;
+ if (set_prod_notify)
+ header->notify_on_prod = ring->n_elements;
+ spin_lock_init(&ring->lock);
+ return ring;
+}
+
+static int qxl_check_header(struct qxl_ring *ring)
+{
+ int ret;
+ struct qxl_ring_header *header = &(ring->ring->header);
+ unsigned long flags;
+ spin_lock_irqsave(&ring->lock, flags);
+ ret = header->prod - header->cons < header->num_items;
+ if (ret == 0)
+ header->notify_on_cons = header->cons + 1;
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+}
+
+static int qxl_check_idle(struct qxl_ring *ring)
+{
+ int ret;
+ struct qxl_ring_header *header = &(ring->ring->header);
+ unsigned long flags;
+ spin_lock_irqsave(&ring->lock, flags);
+ ret = header->prod == header->cons;
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+}
+
+int qxl_ring_push(struct qxl_ring *ring,
+ const void *new_elt, bool interruptible)
+{
+ struct qxl_ring_header *header = &(ring->ring->header);
+ uint8_t *elt;
+ int idx, ret;
+ unsigned long flags;
+ spin_lock_irqsave(&ring->lock, flags);
+ if (header->prod - header->cons == header->num_items) {
+ header->notify_on_cons = header->cons + 1;
+ mb();
+ spin_unlock_irqrestore(&ring->lock, flags);
+ if (!drm_can_sleep()) {
+ while (!qxl_check_header(ring))
+ udelay(1);
+ } else {
+ if (interruptible) {
+ ret = wait_event_interruptible(*ring->push_event,
+ qxl_check_header(ring));
+ if (ret)
+ return ret;
+ } else {
+ wait_event(*ring->push_event,
+ qxl_check_header(ring));
+ }
+
+ }
+ spin_lock_irqsave(&ring->lock, flags);
+ }
+
+ idx = header->prod & (ring->n_elements - 1);
+ elt = ring->ring->elements + idx * ring->element_size;
+
+ memcpy((void *)elt, new_elt, ring->element_size);
+
+ header->prod++;
+
+ mb();
+
+ if (header->prod == header->notify_on_prod)
+ outb(0, ring->prod_notify);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return 0;
+}
+
+static bool qxl_ring_pop(struct qxl_ring *ring,
+ void *element)
+{
+ volatile struct qxl_ring_header *header = &(ring->ring->header);
+ volatile uint8_t *ring_elt;
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&ring->lock, flags);
+ if (header->cons == header->prod) {
+ header->notify_on_prod = header->cons + 1;
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return false;
+ }
+
+ idx = header->cons & (ring->n_elements - 1);
+ ring_elt = ring->ring->elements + idx * ring->element_size;
+
+ memcpy(element, (void *)ring_elt, ring->element_size);
+
+ header->cons++;
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return true;
+}
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+ uint32_t type, bool interruptible)
+{
+ struct qxl_command cmd;
+
+ cmd.type = type;
+ cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+ return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
+}
+
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+ uint32_t type, bool interruptible)
+{
+ struct qxl_command cmd;
+
+ cmd.type = type;
+ cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+ return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
+}
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
+{
+ if (!qxl_check_idle(qdev->release_ring)) {
+ queue_work(qdev->gc_queue, &qdev->gc_work);
+ if (flush)
+ flush_work(&qdev->gc_work);
+ return true;
+ }
+ return false;
+}
+
+int qxl_garbage_collect(struct qxl_device *qdev)
+{
+ struct qxl_release *release;
+ uint64_t id, next_id;
+ int i = 0;
+ int ret;
+ union qxl_release_info *info;
+
+ while (qxl_ring_pop(qdev->release_ring, &id)) {
+ QXL_INFO(qdev, "popped %lld\n", id);
+ while (id) {
+ release = qxl_release_from_id_locked(qdev, id);
+ if (release == NULL)
+ break;
+
+ ret = qxl_release_reserve(qdev, release, false);
+ if (ret) {
+ qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
+ DRM_ERROR("failed to reserve release %lld\n", id);
+ }
+
+ info = qxl_release_map(qdev, release);
+ next_id = info->next;
+ qxl_release_unmap(qdev, release, info);
+
+ qxl_release_unreserve(qdev, release);
+ QXL_INFO(qdev, "popped %lld, next %lld\n", id,
+ next_id);
+
+ switch (release->type) {
+ case QXL_RELEASE_DRAWABLE:
+ case QXL_RELEASE_SURFACE_CMD:
+ case QXL_RELEASE_CURSOR_CMD:
+ break;
+ default:
+ DRM_ERROR("unexpected release type\n");
+ break;
+ }
+ id = next_id;
+
+ qxl_release_free(qdev, release);
+ ++i;
+ }
+ }
+
+ QXL_INFO(qdev, "%s: %lld\n", __func__, i);
+
+ return i;
+}
+
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+ struct qxl_bo **_bo)
+{
+ struct qxl_bo *bo;
+ int ret;
+
+ ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+ QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (ret) {
+ DRM_ERROR("failed to allocate VRAM BO\n");
+ return ret;
+ }
+ ret = qxl_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ *_bo = bo;
+ return 0;
+out_unref:
+ qxl_bo_unref(&bo);
+ return 0;
+}
+
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+{
+ int irq_num;
+ long addr = qdev->io_base + port;
+ int ret;
+
+ mutex_lock(&qdev->async_io_mutex);
+ irq_num = atomic_read(&qdev->irq_received_io_cmd);
+
+
+ if (qdev->last_sent_io_cmd > irq_num) {
+ ret = wait_event_interruptible(qdev->io_cmd_event,
+ atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+ if (ret)
+ goto out;
+ irq_num = atomic_read(&qdev->irq_received_io_cmd);
+ }
+ outb(val, addr);
+ qdev->last_sent_io_cmd = irq_num + 1;
+ ret = wait_event_interruptible(qdev->io_cmd_event,
+ atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+out:
+ mutex_unlock(&qdev->async_io_mutex);
+ return ret;
+}
+
+static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
+{
+ int ret;
+
+restart:
+ ret = wait_for_io_cmd_user(qdev, val, port);
+ if (ret == -ERESTARTSYS)
+ goto restart;
+}
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+ const struct qxl_rect *area)
+{
+ int surface_id;
+ uint32_t surface_width, surface_height;
+ int ret;
+
+ if (!surf->hw_surf_alloc)
+ DRM_ERROR("got io update area with no hw surface\n");
+
+ if (surf->is_primary)
+ surface_id = 0;
+ else
+ surface_id = surf->surface_id;
+ surface_width = surf->surf.width;
+ surface_height = surf->surf.height;
+
+ if (area->left < 0 || area->top < 0 ||
+ area->right > surface_width || area->bottom > surface_height) {
+ qxl_io_log(qdev, "%s: not doing area update for "
+ "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
+ area->top, area->right, area->bottom, surface_width, surface_height);
+ return -EINVAL;
+ }
+ mutex_lock(&qdev->update_area_mutex);
+ qdev->ram_header->update_area = *area;
+ qdev->ram_header->update_surface = surface_id;
+ ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+ mutex_unlock(&qdev->update_area_mutex);
+ return ret;
+}
+
+void qxl_io_notify_oom(struct qxl_device *qdev)
+{
+ outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
+}
+
+void qxl_io_flush_release(struct qxl_device *qdev)
+{
+ outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
+}
+
+void qxl_io_flush_surfaces(struct qxl_device *qdev)
+{
+ wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
+}
+
+
+void qxl_io_destroy_primary(struct qxl_device *qdev)
+{
+ wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+}
+
+void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
+ unsigned height, unsigned offset, struct qxl_bo *bo)
+{
+ struct qxl_surface_create *create;
+
+ QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
+ qdev->ram_header);
+ create = &qdev->ram_header->create_surface;
+ create->format = bo->surf.format;
+ create->width = width;
+ create->height = height;
+ create->stride = bo->surf.stride;
+ create->mem = qxl_bo_physical_address(qdev, bo, offset);
+
+ QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
+ bo->kptr);
+
+ create->flags = QXL_SURF_FLAG_KEEP_DATA;
+ create->type = QXL_SURF_TYPE_PRIMARY;
+
+ wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+}
+
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
+{
+ QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+ wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
+}
+
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
+ va_end(args);
+ /*
+ * DO not do a DRM output here - this will call printk, which will
+ * call back into qxl for rendering (qxl_fb)
+ */
+ outb(0, qdev->io_base + QXL_IO_LOG);
+}
+
+void qxl_io_reset(struct qxl_device *qdev)
+{
+ outb(0, qdev->io_base + QXL_IO_RESET);
+}
+
+void qxl_io_monitors_config(struct qxl_device *qdev)
+{
+ qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
+ qdev->monitors_config ?
+ qdev->monitors_config->count : -1,
+ qdev->monitors_config && qdev->monitors_config->count ?
+ qdev->monitors_config->heads[0].width : -1,
+ qdev->monitors_config && qdev->monitors_config->count ?
+ qdev->monitors_config->heads[0].height : -1,
+ qdev->monitors_config && qdev->monitors_config->count ?
+ qdev->monitors_config->heads[0].x : -1,
+ qdev->monitors_config && qdev->monitors_config->count ?
+ qdev->monitors_config->heads[0].y : -1
+ );
+
+ wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
+}
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+ struct qxl_bo *surf)
+{
+ uint32_t handle;
+ int idr_ret;
+ int count = 0;
+again:
+ idr_preload(GFP_ATOMIC);
+ spin_lock(&qdev->surf_id_idr_lock);
+ idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
+ spin_unlock(&qdev->surf_id_idr_lock);
+ idr_preload_end();
+ if (idr_ret < 0)
+ return idr_ret;
+ handle = idr_ret;
+
+ if (handle >= qdev->rom->n_surfaces) {
+ count++;
+ spin_lock(&qdev->surf_id_idr_lock);
+ idr_remove(&qdev->surf_id_idr, handle);
+ spin_unlock(&qdev->surf_id_idr_lock);
+ qxl_reap_surface_id(qdev, 2);
+ goto again;
+ }
+ surf->surface_id = handle;
+
+ spin_lock(&qdev->surf_id_idr_lock);
+ qdev->last_alloced_surf_id = handle;
+ spin_unlock(&qdev->surf_id_idr_lock);
+ return 0;
+}
+
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+ uint32_t surface_id)
+{
+ spin_lock(&qdev->surf_id_idr_lock);
+ idr_remove(&qdev->surf_id_idr, surface_id);
+ spin_unlock(&qdev->surf_id_idr_lock);
+}
+
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+ struct qxl_bo *surf,
+ struct ttm_mem_reg *new_mem)
+{
+ struct qxl_surface_cmd *cmd;
+ struct qxl_release *release;
+ int ret;
+
+ if (surf->hw_surf_alloc)
+ return 0;
+
+ ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
+ NULL,
+ &release);
+ if (ret)
+ return ret;
+
+ cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_SURFACE_CMD_CREATE;
+ cmd->u.surface_create.format = surf->surf.format;
+ cmd->u.surface_create.width = surf->surf.width;
+ cmd->u.surface_create.height = surf->surf.height;
+ cmd->u.surface_create.stride = surf->surf.stride;
+ if (new_mem) {
+ int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+ struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+ /* TODO - need to hold one of the locks to read tbo.offset */
+ cmd->u.surface_create.data = slot->high_bits;
+
+ cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
+ } else
+ cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+ cmd->surface_id = surf->surface_id;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ surf->surf_create = release;
+
+ /* no need to add a release to the fence for this bo,
+ since it is only released when we ask to destroy the surface
+ and it would never signal otherwise */
+ qxl_fence_releaseable(qdev, release);
+
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+ qxl_release_unreserve(qdev, release);
+
+ surf->hw_surf_alloc = true;
+ spin_lock(&qdev->surf_id_idr_lock);
+ idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
+ spin_unlock(&qdev->surf_id_idr_lock);
+ return 0;
+}
+
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+ struct qxl_bo *surf)
+{
+ struct qxl_surface_cmd *cmd;
+ struct qxl_release *release;
+ int ret;
+ int id;
+
+ if (!surf->hw_surf_alloc)
+ return 0;
+
+ ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
+ surf->surf_create,
+ &release);
+ if (ret)
+ return ret;
+
+ surf->surf_create = NULL;
+ /* remove the surface from the idr, but not the surface id yet */
+ spin_lock(&qdev->surf_id_idr_lock);
+ idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
+ spin_unlock(&qdev->surf_id_idr_lock);
+ surf->hw_surf_alloc = false;
+
+ id = surf->surface_id;
+ surf->surface_id = 0;
+
+ release->surface_release_id = id;
+ cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_SURFACE_CMD_DESTROY;
+ cmd->surface_id = id;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_fence_releaseable(qdev, release);
+
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+ qxl_release_unreserve(qdev, release);
+
+
+ return 0;
+}
+
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+{
+ struct qxl_rect rect;
+ int ret;
+
+ /* if we are evicting, we need to make sure the surface is up
+ to date */
+ rect.left = 0;
+ rect.right = surf->surf.width;
+ rect.top = 0;
+ rect.bottom = surf->surf.height;
+retry:
+ ret = qxl_io_update_area(qdev, surf, &rect);
+ if (ret == -ERESTARTSYS)
+ goto retry;
+ return ret;
+}
+
+static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+ /* no need to update area if we are just freeing the surface normally */
+ if (do_update_area)
+ qxl_update_surface(qdev, surf);
+
+ /* nuke the surface id at the hw */
+ qxl_hw_surface_dealloc(qdev, surf);
+}
+
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+ mutex_lock(&qdev->surf_evict_mutex);
+ qxl_surface_evict_locked(qdev, surf, do_update_area);
+ mutex_unlock(&qdev->surf_evict_mutex);
+}
+
+static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
+{
+ int ret;
+
+ ret = qxl_bo_reserve(surf, false);
+ if (ret == -EBUSY)
+ return -EBUSY;
+
+ if (surf->fence.num_active_releases > 0 && stall == false) {
+ qxl_bo_unreserve(surf);
+ return -EBUSY;
+ }
+
+ if (stall)
+ mutex_unlock(&qdev->surf_evict_mutex);
+
+ spin_lock(&surf->tbo.bdev->fence_lock);
+ ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+ spin_unlock(&surf->tbo.bdev->fence_lock);
+
+ if (stall)
+ mutex_lock(&qdev->surf_evict_mutex);
+ if (ret == -EBUSY) {
+ qxl_bo_unreserve(surf);
+ return -EBUSY;
+ }
+
+ qxl_surface_evict_locked(qdev, surf, true);
+ qxl_bo_unreserve(surf);
+ return 0;
+}
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
+{
+ int num_reaped = 0;
+ int i, ret;
+ bool stall = false;
+ int start = 0;
+
+ mutex_lock(&qdev->surf_evict_mutex);
+again:
+
+ spin_lock(&qdev->surf_id_idr_lock);
+ start = qdev->last_alloced_surf_id + 1;
+ spin_unlock(&qdev->surf_id_idr_lock);
+
+ for (i = start; i < start + qdev->rom->n_surfaces; i++) {
+ void *objptr;
+ int surfid = i % qdev->rom->n_surfaces;
+
+ /* this avoids the case where the objects is in the
+ idr but has been evicted half way - its makes
+ the idr lookup atomic with the eviction */
+ spin_lock(&qdev->surf_id_idr_lock);
+ objptr = idr_find(&qdev->surf_id_idr, surfid);
+ spin_unlock(&qdev->surf_id_idr_lock);
+
+ if (!objptr)
+ continue;
+
+ ret = qxl_reap_surf(qdev, objptr, stall);
+ if (ret == 0)
+ num_reaped++;
+ if (num_reaped >= max_to_reap)
+ break;
+ }
+ if (num_reaped == 0 && stall == false) {
+ stall = true;
+ goto again;
+ }
+
+ mutex_unlock(&qdev->surf_evict_mutex);
+ if (num_reaped) {
+ usleep_range(500, 1000);
+ qxl_queue_garbage_collect(qdev, true);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644
index 0000000..c3c2bbd
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+
+#if defined(CONFIG_DEBUG_FS)
+static int
+qxl_debugfs_irq_received(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct qxl_device *qdev = node->minor->dev->dev_private;
+
+ seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+ seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+ seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+ seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+ seq_printf(m, "%d\n", qdev->irq_received_error);
+ return 0;
+}
+
+static int
+qxl_debugfs_buffers_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_bo *bo;
+
+ list_for_each_entry(bo, &qdev->gem.objects, list) {
+ seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
+ (unsigned long)bo->gem_base.size, bo->pin_count,
+ bo->tbo.sync_obj, bo->fence.num_active_releases);
+ }
+ return 0;
+}
+
+static struct drm_info_list qxl_debugfs_list[] = {
+ { "irq_received", qxl_debugfs_irq_received, 0, NULL },
+ { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
+};
+#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
+#endif
+
+int
+qxl_debugfs_init(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+#endif
+ return 0;
+}
+
+void
+qxl_debugfs_takedown(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+ minor);
+#endif
+}
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < qdev->debugfs_count; i++) {
+ if (qdev->debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+
+ i = qdev->debugfs_count + 1;
+ if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
+ return -EINVAL;
+ }
+ qdev->debugfs[qdev->debugfs_count].files = files;
+ qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
+ qdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ qdev->ddev->control->debugfs_root,
+ qdev->ddev->control);
+ drm_debugfs_create_files(files, nfiles,
+ qdev->ddev->primary->debugfs_root,
+ qdev->ddev->primary);
+#endif
+ return 0;
+}
+
+void qxl_debugfs_remove_files(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < qdev->debugfs_count; i++) {
+ drm_debugfs_remove_files(qdev->debugfs[i].files,
+ qdev->debugfs[i].num_files,
+ qdev->ddev->control);
+ drm_debugfs_remove_files(qdev->debugfs[i].files,
+ qdev->debugfs[i].num_files,
+ qdev->ddev->primary);
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644
index 0000000..94c5aec
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -0,0 +1,879 @@
+/*
+ Copyright (C) 2009 Red Hat, Inc.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
+ IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+#ifndef H_QXL_DEV
+#define H_QXL_DEV
+
+#include <linux/types.h>
+
+/*
+ * from spice-protocol
+ * Release 0.10.0
+ */
+
+/* enums.h */
+
+enum SpiceImageType {
+ SPICE_IMAGE_TYPE_BITMAP,
+ SPICE_IMAGE_TYPE_QUIC,
+ SPICE_IMAGE_TYPE_RESERVED,
+ SPICE_IMAGE_TYPE_LZ_PLT = 100,
+ SPICE_IMAGE_TYPE_LZ_RGB,
+ SPICE_IMAGE_TYPE_GLZ_RGB,
+ SPICE_IMAGE_TYPE_FROM_CACHE,
+ SPICE_IMAGE_TYPE_SURFACE,
+ SPICE_IMAGE_TYPE_JPEG,
+ SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
+ SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
+ SPICE_IMAGE_TYPE_JPEG_ALPHA,
+
+ SPICE_IMAGE_TYPE_ENUM_END
+};
+
+enum SpiceBitmapFmt {
+ SPICE_BITMAP_FMT_INVALID,
+ SPICE_BITMAP_FMT_1BIT_LE,
+ SPICE_BITMAP_FMT_1BIT_BE,
+ SPICE_BITMAP_FMT_4BIT_LE,
+ SPICE_BITMAP_FMT_4BIT_BE,
+ SPICE_BITMAP_FMT_8BIT,
+ SPICE_BITMAP_FMT_16BIT,
+ SPICE_BITMAP_FMT_24BIT,
+ SPICE_BITMAP_FMT_32BIT,
+ SPICE_BITMAP_FMT_RGBA,
+
+ SPICE_BITMAP_FMT_ENUM_END
+};
+
+enum SpiceSurfaceFmt {
+ SPICE_SURFACE_FMT_INVALID,
+ SPICE_SURFACE_FMT_1_A,
+ SPICE_SURFACE_FMT_8_A = 8,
+ SPICE_SURFACE_FMT_16_555 = 16,
+ SPICE_SURFACE_FMT_32_xRGB = 32,
+ SPICE_SURFACE_FMT_16_565 = 80,
+ SPICE_SURFACE_FMT_32_ARGB = 96,
+
+ SPICE_SURFACE_FMT_ENUM_END
+};
+
+enum SpiceClipType {
+ SPICE_CLIP_TYPE_NONE,
+ SPICE_CLIP_TYPE_RECTS,
+
+ SPICE_CLIP_TYPE_ENUM_END
+};
+
+enum SpiceRopd {
+ SPICE_ROPD_INVERS_SRC = (1 << 0),
+ SPICE_ROPD_INVERS_BRUSH = (1 << 1),
+ SPICE_ROPD_INVERS_DEST = (1 << 2),
+ SPICE_ROPD_OP_PUT = (1 << 3),
+ SPICE_ROPD_OP_OR = (1 << 4),
+ SPICE_ROPD_OP_AND = (1 << 5),
+ SPICE_ROPD_OP_XOR = (1 << 6),
+ SPICE_ROPD_OP_BLACKNESS = (1 << 7),
+ SPICE_ROPD_OP_WHITENESS = (1 << 8),
+ SPICE_ROPD_OP_INVERS = (1 << 9),
+ SPICE_ROPD_INVERS_RES = (1 << 10),
+
+ SPICE_ROPD_MASK = 0x7ff
+};
+
+enum SpiceBrushType {
+ SPICE_BRUSH_TYPE_NONE,
+ SPICE_BRUSH_TYPE_SOLID,
+ SPICE_BRUSH_TYPE_PATTERN,
+
+ SPICE_BRUSH_TYPE_ENUM_END
+};
+
+enum SpiceCursorType {
+ SPICE_CURSOR_TYPE_ALPHA,
+ SPICE_CURSOR_TYPE_MONO,
+ SPICE_CURSOR_TYPE_COLOR4,
+ SPICE_CURSOR_TYPE_COLOR8,
+ SPICE_CURSOR_TYPE_COLOR16,
+ SPICE_CURSOR_TYPE_COLOR24,
+ SPICE_CURSOR_TYPE_COLOR32,
+
+ SPICE_CURSOR_TYPE_ENUM_END
+};
+
+/* qxl_dev.h */
+
+#pragma pack(push, 1)
+
+#define REDHAT_PCI_VENDOR_ID 0x1b36
+
+/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
+#define QXL_DEVICE_ID_STABLE 0x0100
+
+enum {
+ QXL_REVISION_STABLE_V04 = 0x01,
+ QXL_REVISION_STABLE_V06 = 0x02,
+ QXL_REVISION_STABLE_V10 = 0x03,
+ QXL_REVISION_STABLE_V12 = 0x04,
+};
+
+#define QXL_DEVICE_ID_DEVEL 0x01ff
+#define QXL_REVISION_DEVEL 0x01
+
+#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
+#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
+
+enum {
+ QXL_RAM_RANGE_INDEX,
+ QXL_VRAM_RANGE_INDEX,
+ QXL_ROM_RANGE_INDEX,
+ QXL_IO_RANGE_INDEX,
+
+ QXL_PCI_RANGES
+};
+
+/* qxl-1 compat: append only */
+enum {
+ QXL_IO_NOTIFY_CMD,
+ QXL_IO_NOTIFY_CURSOR,
+ QXL_IO_UPDATE_AREA,
+ QXL_IO_UPDATE_IRQ,
+ QXL_IO_NOTIFY_OOM,
+ QXL_IO_RESET,
+ QXL_IO_SET_MODE, /* qxl-1 */
+ QXL_IO_LOG,
+ /* appended for qxl-2 */
+ QXL_IO_MEMSLOT_ADD,
+ QXL_IO_MEMSLOT_DEL,
+ QXL_IO_DETACH_PRIMARY,
+ QXL_IO_ATTACH_PRIMARY,
+ QXL_IO_CREATE_PRIMARY,
+ QXL_IO_DESTROY_PRIMARY,
+ QXL_IO_DESTROY_SURFACE_WAIT,
+ QXL_IO_DESTROY_ALL_SURFACES,
+ /* appended for qxl-3 */
+ QXL_IO_UPDATE_AREA_ASYNC,
+ QXL_IO_MEMSLOT_ADD_ASYNC,
+ QXL_IO_CREATE_PRIMARY_ASYNC,
+ QXL_IO_DESTROY_PRIMARY_ASYNC,
+ QXL_IO_DESTROY_SURFACE_ASYNC,
+ QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
+ QXL_IO_FLUSH_SURFACES_ASYNC,
+ QXL_IO_FLUSH_RELEASE,
+ /* appended for qxl-4 */
+ QXL_IO_MONITORS_CONFIG_ASYNC,
+
+ QXL_IO_RANGE_SIZE
+};
+
+typedef uint64_t QXLPHYSICAL;
+typedef int32_t QXLFIXED; /* fixed 28.4 */
+
+struct qxl_point_fix {
+ QXLFIXED x;
+ QXLFIXED y;
+};
+
+struct qxl_point {
+ int32_t x;
+ int32_t y;
+};
+
+struct qxl_point_1_6 {
+ int16_t x;
+ int16_t y;
+};
+
+struct qxl_rect {
+ int32_t top;
+ int32_t left;
+ int32_t bottom;
+ int32_t right;
+};
+
+struct qxl_urect {
+ uint32_t top;
+ uint32_t left;
+ uint32_t bottom;
+ uint32_t right;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_rom {
+ uint32_t magic;
+ uint32_t id;
+ uint32_t update_id;
+ uint32_t compression_level;
+ uint32_t log_level;
+ uint32_t mode; /* qxl-1 */
+ uint32_t modes_offset;
+ uint32_t num_io_pages;
+ uint32_t pages_offset; /* qxl-1 */
+ uint32_t draw_area_offset; /* qxl-1 */
+ uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */
+ uint32_t ram_header_offset;
+ uint32_t mm_clock;
+ /* appended for qxl-2 */
+ uint32_t n_surfaces;
+ uint64_t flags;
+ uint8_t slots_start;
+ uint8_t slots_end;
+ uint8_t slot_gen_bits;
+ uint8_t slot_id_bits;
+ uint8_t slot_generation;
+ /* appended for qxl-4 */
+ uint8_t client_present;
+ uint8_t client_capabilities[58];
+ uint32_t client_monitors_config_crc;
+ struct {
+ uint16_t count;
+ uint16_t padding;
+ struct qxl_urect heads[64];
+ } client_monitors_config;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_mode {
+ uint32_t id;
+ uint32_t x_res;
+ uint32_t y_res;
+ uint32_t bits;
+ uint32_t stride;
+ uint32_t x_mili;
+ uint32_t y_mili;
+ uint32_t orientation;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_modes {
+ uint32_t n_modes;
+ struct qxl_mode modes[0];
+};
+
+/* qxl-1 compat: append only */
+enum qxl_cmd_type {
+ QXL_CMD_NOP,
+ QXL_CMD_DRAW,
+ QXL_CMD_UPDATE,
+ QXL_CMD_CURSOR,
+ QXL_CMD_MESSAGE,
+ QXL_CMD_SURFACE,
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_command {
+ QXLPHYSICAL data;
+ uint32_t type;
+ uint32_t padding;
+};
+
+#define QXL_COMMAND_FLAG_COMPAT (1<<0)
+#define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0)
+
+struct qxl_command_ext {
+ struct qxl_command cmd;
+ uint32_t group_id;
+ uint32_t flags;
+};
+
+struct qxl_mem_slot {
+ uint64_t mem_start;
+ uint64_t mem_end;
+};
+
+#define QXL_SURF_TYPE_PRIMARY 0
+
+#define QXL_SURF_FLAG_KEEP_DATA (1 << 0)
+
+struct qxl_surface_create {
+ uint32_t width;
+ uint32_t height;
+ int32_t stride;
+ uint32_t format;
+ uint32_t position;
+ uint32_t mouse_mode;
+ uint32_t flags;
+ uint32_t type;
+ QXLPHYSICAL mem;
+};
+
+#define QXL_COMMAND_RING_SIZE 32
+#define QXL_CURSOR_RING_SIZE 32
+#define QXL_RELEASE_RING_SIZE 8
+
+#define QXL_LOG_BUF_SIZE 4096
+
+#define QXL_INTERRUPT_DISPLAY (1 << 0)
+#define QXL_INTERRUPT_CURSOR (1 << 1)
+#define QXL_INTERRUPT_IO_CMD (1 << 2)
+#define QXL_INTERRUPT_ERROR (1 << 3)
+#define QXL_INTERRUPT_CLIENT (1 << 4)
+#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 5)
+
+struct qxl_ring_header {
+ uint32_t num_items;
+ uint32_t prod;
+ uint32_t notify_on_prod;
+ uint32_t cons;
+ uint32_t notify_on_cons;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_ram_header {
+ uint32_t magic;
+ uint32_t int_pending;
+ uint32_t int_mask;
+ uint8_t log_buf[QXL_LOG_BUF_SIZE];
+ struct qxl_ring_header cmd_ring_hdr;
+ struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE];
+ struct qxl_ring_header cursor_ring_hdr;
+ struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE];
+ struct qxl_ring_header release_ring_hdr;
+ uint64_t release_ring[QXL_RELEASE_RING_SIZE];
+ struct qxl_rect update_area;
+ /* appended for qxl-2 */
+ uint32_t update_surface;
+ struct qxl_mem_slot mem_slot;
+ struct qxl_surface_create create_surface;
+ uint64_t flags;
+
+ /* appended for qxl-4 */
+
+ /* used by QXL_IO_MONITORS_CONFIG_ASYNC */
+ QXLPHYSICAL monitors_config;
+ uint8_t guest_capabilities[64];
+};
+
+union qxl_release_info {
+ uint64_t id; /* in */
+ uint64_t next; /* out */
+};
+
+struct qxl_release_info_ext {
+ union qxl_release_info *info;
+ uint32_t group_id;
+};
+
+struct qxl_data_chunk {
+ uint32_t data_size;
+ QXLPHYSICAL prev_chunk;
+ QXLPHYSICAL next_chunk;
+ uint8_t data[0];
+};
+
+struct qxl_message {
+ union qxl_release_info release_info;
+ uint8_t data[0];
+};
+
+struct qxl_compat_update_cmd {
+ union qxl_release_info release_info;
+ struct qxl_rect area;
+ uint32_t update_id;
+};
+
+struct qxl_update_cmd {
+ union qxl_release_info release_info;
+ struct qxl_rect area;
+ uint32_t update_id;
+ uint32_t surface_id;
+};
+
+struct qxl_cursor_header {
+ uint64_t unique;
+ uint16_t type;
+ uint16_t width;
+ uint16_t height;
+ uint16_t hot_spot_x;
+ uint16_t hot_spot_y;
+};
+
+struct qxl_cursor {
+ struct qxl_cursor_header header;
+ uint32_t data_size;
+ struct qxl_data_chunk chunk;
+};
+
+enum {
+ QXL_CURSOR_SET,
+ QXL_CURSOR_MOVE,
+ QXL_CURSOR_HIDE,
+ QXL_CURSOR_TRAIL,
+};
+
+#define QXL_CURSOR_DEVICE_DATA_SIZE 128
+
+struct qxl_cursor_cmd {
+ union qxl_release_info release_info;
+ uint8_t type;
+ union {
+ struct {
+ struct qxl_point_1_6 position;
+ uint8_t visible;
+ QXLPHYSICAL shape;
+ } set;
+ struct {
+ uint16_t length;
+ uint16_t frequency;
+ } trail;
+ struct qxl_point_1_6 position;
+ } u;
+ /* todo: dynamic size from rom */
+ uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
+};
+
+enum {
+ QXL_DRAW_NOP,
+ QXL_DRAW_FILL,
+ QXL_DRAW_OPAQUE,
+ QXL_DRAW_COPY,
+ QXL_COPY_BITS,
+ QXL_DRAW_BLEND,
+ QXL_DRAW_BLACKNESS,
+ QXL_DRAW_WHITENESS,
+ QXL_DRAW_INVERS,
+ QXL_DRAW_ROP3,
+ QXL_DRAW_STROKE,
+ QXL_DRAW_TEXT,
+ QXL_DRAW_TRANSPARENT,
+ QXL_DRAW_ALPHA_BLEND,
+ QXL_DRAW_COMPOSITE
+};
+
+struct qxl_raster_glyph {
+ struct qxl_point render_pos;
+ struct qxl_point glyph_origin;
+ uint16_t width;
+ uint16_t height;
+ uint8_t data[0];
+};
+
+struct qxl_string {
+ uint32_t data_size;
+ uint16_t length;
+ uint16_t flags;
+ struct qxl_data_chunk chunk;
+};
+
+struct qxl_copy_bits {
+ struct qxl_point src_pos;
+};
+
+enum qxl_effect_type {
+ QXL_EFFECT_BLEND = 0,
+ QXL_EFFECT_OPAQUE = 1,
+ QXL_EFFECT_REVERT_ON_DUP = 2,
+ QXL_EFFECT_BLACKNESS_ON_DUP = 3,
+ QXL_EFFECT_WHITENESS_ON_DUP = 4,
+ QXL_EFFECT_NOP_ON_DUP = 5,
+ QXL_EFFECT_NOP = 6,
+ QXL_EFFECT_OPAQUE_BRUSH = 7
+};
+
+struct qxl_pattern {
+ QXLPHYSICAL pat;
+ struct qxl_point pos;
+};
+
+struct qxl_brush {
+ uint32_t type;
+ union {
+ uint32_t color;
+ struct qxl_pattern pattern;
+ } u;
+};
+
+struct qxl_q_mask {
+ uint8_t flags;
+ struct qxl_point pos;
+ QXLPHYSICAL bitmap;
+};
+
+struct qxl_fill {
+ struct qxl_brush brush;
+ uint16_t rop_descriptor;
+ struct qxl_q_mask mask;
+};
+
+struct qxl_opaque {
+ QXLPHYSICAL src_bitmap;
+ struct qxl_rect src_area;
+ struct qxl_brush brush;
+ uint16_t rop_descriptor;
+ uint8_t scale_mode;
+ struct qxl_q_mask mask;
+};
+
+struct qxl_copy {
+ QXLPHYSICAL src_bitmap;
+ struct qxl_rect src_area;
+ uint16_t rop_descriptor;
+ uint8_t scale_mode;
+ struct qxl_q_mask mask;
+};
+
+struct qxl_transparent {
+ QXLPHYSICAL src_bitmap;
+ struct qxl_rect src_area;
+ uint32_t src_color;
+ uint32_t true_color;
+};
+
+struct qxl_alpha_blend {
+ uint16_t alpha_flags;
+ uint8_t alpha;
+ QXLPHYSICAL src_bitmap;
+ struct qxl_rect src_area;
+};
+
+struct qxl_compat_alpha_blend {
+ uint8_t alpha;
+ QXLPHYSICAL src_bitmap;
+ struct qxl_rect src_area;
+};
+
+struct qxl_rop_3 {
+ QXLPHYSICAL src_bitmap;
+ struct qxl_rect src_area;
+ struct qxl_brush brush;
+ uint8_t rop3;
+ uint8_t scale_mode;
+ struct qxl_q_mask mask;
+};
+
+struct qxl_line_attr {
+ uint8_t flags;
+ uint8_t join_style;
+ uint8_t end_style;
+ uint8_t style_nseg;
+ QXLFIXED width;
+ QXLFIXED miter_limit;
+ QXLPHYSICAL style;
+};
+
+struct qxl_stroke {
+ QXLPHYSICAL path;
+ struct qxl_line_attr attr;
+ struct qxl_brush brush;
+ uint16_t fore_mode;
+ uint16_t back_mode;
+};
+
+struct qxl_text {
+ QXLPHYSICAL str;
+ struct qxl_rect back_area;
+ struct qxl_brush fore_brush;
+ struct qxl_brush back_brush;
+ uint16_t fore_mode;
+ uint16_t back_mode;
+};
+
+struct qxl_mask {
+ struct qxl_q_mask mask;
+};
+
+struct qxl_clip {
+ uint32_t type;
+ QXLPHYSICAL data;
+};
+
+enum qxl_operator {
+ QXL_OP_CLEAR = 0x00,
+ QXL_OP_SOURCE = 0x01,
+ QXL_OP_DST = 0x02,
+ QXL_OP_OVER = 0x03,
+ QXL_OP_OVER_REVERSE = 0x04,
+ QXL_OP_IN = 0x05,
+ QXL_OP_IN_REVERSE = 0x06,
+ QXL_OP_OUT = 0x07,
+ QXL_OP_OUT_REVERSE = 0x08,
+ QXL_OP_ATOP = 0x09,
+ QXL_OP_ATOP_REVERSE = 0x0a,
+ QXL_OP_XOR = 0x0b,
+ QXL_OP_ADD = 0x0c,
+ QXL_OP_SATURATE = 0x0d,
+ /* Note the jump here from 0x0d to 0x30 */
+ QXL_OP_MULTIPLY = 0x30,
+ QXL_OP_SCREEN = 0x31,
+ QXL_OP_OVERLAY = 0x32,
+ QXL_OP_DARKEN = 0x33,
+ QXL_OP_LIGHTEN = 0x34,
+ QXL_OP_COLOR_DODGE = 0x35,
+ QXL_OP_COLOR_BURN = 0x36,
+ QXL_OP_HARD_LIGHT = 0x37,
+ QXL_OP_SOFT_LIGHT = 0x38,
+ QXL_OP_DIFFERENCE = 0x39,
+ QXL_OP_EXCLUSION = 0x3a,
+ QXL_OP_HSL_HUE = 0x3b,
+ QXL_OP_HSL_SATURATION = 0x3c,
+ QXL_OP_HSL_COLOR = 0x3d,
+ QXL_OP_HSL_LUMINOSITY = 0x3e
+};
+
+struct qxl_transform {
+ uint32_t t00;
+ uint32_t t01;
+ uint32_t t02;
+ uint32_t t10;
+ uint32_t t11;
+ uint32_t t12;
+};
+
+/* The flags field has the following bit fields:
+ *
+ * operator: [ 0 - 7 ]
+ * src_filter: [ 8 - 10 ]
+ * mask_filter: [ 11 - 13 ]
+ * src_repeat: [ 14 - 15 ]
+ * mask_repeat: [ 16 - 17 ]
+ * component_alpha: [ 18 - 18 ]
+ * reserved: [ 19 - 31 ]
+ *
+ * The repeat and filter values are those of pixman:
+ * REPEAT_NONE = 0
+ * REPEAT_NORMAL = 1
+ * REPEAT_PAD = 2
+ * REPEAT_REFLECT = 3
+ *
+ * The filter values are:
+ * FILTER_NEAREST = 0
+ * FILTER_BILINEAR = 1
+ */
+struct qxl_composite {
+ uint32_t flags;
+
+ QXLPHYSICAL src;
+ QXLPHYSICAL src_transform; /* May be NULL */
+ QXLPHYSICAL mask; /* May be NULL */
+ QXLPHYSICAL mask_transform; /* May be NULL */
+ struct qxl_point_1_6 src_origin;
+ struct qxl_point_1_6 mask_origin;
+};
+
+struct qxl_compat_drawable {
+ union qxl_release_info release_info;
+ uint8_t effect;
+ uint8_t type;
+ uint16_t bitmap_offset;
+ struct qxl_rect bitmap_area;
+ struct qxl_rect bbox;
+ struct qxl_clip clip;
+ uint32_t mm_time;
+ union {
+ struct qxl_fill fill;
+ struct qxl_opaque opaque;
+ struct qxl_copy copy;
+ struct qxl_transparent transparent;
+ struct qxl_compat_alpha_blend alpha_blend;
+ struct qxl_copy_bits copy_bits;
+ struct qxl_copy blend;
+ struct qxl_rop_3 rop3;
+ struct qxl_stroke stroke;
+ struct qxl_text text;
+ struct qxl_mask blackness;
+ struct qxl_mask invers;
+ struct qxl_mask whiteness;
+ } u;
+};
+
+struct qxl_drawable {
+ union qxl_release_info release_info;
+ uint32_t surface_id;
+ uint8_t effect;
+ uint8_t type;
+ uint8_t self_bitmap;
+ struct qxl_rect self_bitmap_area;
+ struct qxl_rect bbox;
+ struct qxl_clip clip;
+ uint32_t mm_time;
+ int32_t surfaces_dest[3];
+ struct qxl_rect surfaces_rects[3];
+ union {
+ struct qxl_fill fill;
+ struct qxl_opaque opaque;
+ struct qxl_copy copy;
+ struct qxl_transparent transparent;
+ struct qxl_alpha_blend alpha_blend;
+ struct qxl_copy_bits copy_bits;
+ struct qxl_copy blend;
+ struct qxl_rop_3 rop3;
+ struct qxl_stroke stroke;
+ struct qxl_text text;
+ struct qxl_mask blackness;
+ struct qxl_mask invers;
+ struct qxl_mask whiteness;
+ struct qxl_composite composite;
+ } u;
+};
+
+enum qxl_surface_cmd_type {
+ QXL_SURFACE_CMD_CREATE,
+ QXL_SURFACE_CMD_DESTROY,
+};
+
+struct qxl_surface {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ int32_t stride;
+ QXLPHYSICAL data;
+};
+
+struct qxl_surface_cmd {
+ union qxl_release_info release_info;
+ uint32_t surface_id;
+ uint8_t type;
+ uint32_t flags;
+ union {
+ struct qxl_surface surface_create;
+ } u;
+};
+
+struct qxl_clip_rects {
+ uint32_t num_rects;
+ struct qxl_data_chunk chunk;
+};
+
+enum {
+ QXL_PATH_BEGIN = (1 << 0),
+ QXL_PATH_END = (1 << 1),
+ QXL_PATH_CLOSE = (1 << 3),
+ QXL_PATH_BEZIER = (1 << 4),
+};
+
+struct qxl_path_seg {
+ uint32_t flags;
+ uint32_t count;
+ struct qxl_point_fix points[0];
+};
+
+struct qxl_path {
+ uint32_t data_size;
+ struct qxl_data_chunk chunk;
+};
+
+enum {
+ QXL_IMAGE_GROUP_DRIVER,
+ QXL_IMAGE_GROUP_DEVICE,
+ QXL_IMAGE_GROUP_RED,
+ QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
+};
+
+struct qxl_image_id {
+ uint32_t group;
+ uint32_t unique;
+};
+
+union qxl_image_id_union {
+ struct qxl_image_id id;
+ uint64_t value;
+};
+
+enum qxl_image_flags {
+ QXL_IMAGE_CACHE = (1 << 0),
+ QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
+};
+
+enum qxl_bitmap_flags {
+ QXL_BITMAP_DIRECT = (1 << 0),
+ QXL_BITMAP_UNSTABLE = (1 << 1),
+ QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
+};
+
+#define QXL_SET_IMAGE_ID(image, _group, _unique) { \
+ (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \
+}
+
+struct qxl_image_descriptor {
+ uint64_t id;
+ uint8_t type;
+ uint8_t flags;
+ uint32_t width;
+ uint32_t height;
+};
+
+struct qxl_palette {
+ uint64_t unique;
+ uint16_t num_ents;
+ uint32_t ents[0];
+};
+
+struct qxl_bitmap {
+ uint8_t format;
+ uint8_t flags;
+ uint32_t x;
+ uint32_t y;
+ uint32_t stride;
+ QXLPHYSICAL palette;
+ QXLPHYSICAL data; /* data[0] ? */
+};
+
+struct qxl_surface_id {
+ uint32_t surface_id;
+};
+
+struct qxl_encoder_data {
+ uint32_t data_size;
+ uint8_t data[0];
+};
+
+struct qxl_image {
+ struct qxl_image_descriptor descriptor;
+ union { /* variable length */
+ struct qxl_bitmap bitmap;
+ struct qxl_encoder_data quic;
+ struct qxl_surface_id surface_image;
+ } u;
+};
+
+/* A QXLHead is a single monitor output backed by a QXLSurface.
+ * x and y offsets are unsigned since they are used in relation to
+ * the given surface, not the same as the x, y coordinates in the guest
+ * screen reference frame. */
+struct qxl_head {
+ uint32_t id;
+ uint32_t surface_id;
+ uint32_t width;
+ uint32_t height;
+ uint32_t x;
+ uint32_t y;
+ uint32_t flags;
+};
+
+struct qxl_monitors_config {
+ uint16_t count;
+ uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
+ driver */
+ struct qxl_head heads[0];
+};
+
+#pragma pack(pop)
+
+#endif /* _H_QXL_DEV */
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644
index 0000000..fcfd443
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -0,0 +1,982 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+
+#include "linux/crc32.h"
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+#include "drm_crtc_helper.h"
+
+static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
+ struct drm_connector *connector,
+ struct qxl_head *head)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+ int width = head->width;
+ int height = head->height;
+
+ if (width < 320 || height < 240) {
+ qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
+ width = 1024;
+ height = 768;
+ }
+ if (width * height * 4 > 16*1024*1024) {
+ width = 1024;
+ height = 768;
+ }
+ /* TODO: go over regular modes and removed preferred? */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+ mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->status = MODE_OK;
+ drm_mode_probed_add(connector, mode);
+ qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
+}
+
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
+{
+ struct drm_connector *connector;
+ int i;
+ struct drm_device *dev = qdev->ddev;
+
+ i = 0;
+ qxl_io_log(qdev, "%s: %d, %d\n", __func__,
+ dev->mode_config.num_connector,
+ qdev->monitors_config->count);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (i > qdev->monitors_config->count) {
+ /* crtc will be reported as disabled */
+ continue;
+ }
+ qxl_crtc_set_to_mode(qdev, connector,
+ &qdev->monitors_config->heads[i]);
+ ++i;
+ }
+}
+
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+{
+ if (qdev->client_monitors_config &&
+ count > qdev->client_monitors_config->count) {
+ kfree(qdev->client_monitors_config);
+ qdev->client_monitors_config = NULL;
+ }
+ if (!qdev->client_monitors_config) {
+ qdev->client_monitors_config = kzalloc(
+ sizeof(struct qxl_monitors_config) +
+ sizeof(struct qxl_head) * count, GFP_KERNEL);
+ if (!qdev->client_monitors_config) {
+ qxl_io_log(qdev,
+ "%s: allocation failure for %u heads\n",
+ __func__, count);
+ return;
+ }
+ }
+ qdev->client_monitors_config->count = count;
+}
+
+static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
+{
+ int i;
+ int num_monitors;
+ uint32_t crc;
+
+ BUG_ON(!qdev->monitors_config);
+ num_monitors = qdev->rom->client_monitors_config.count;
+ crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
+ sizeof(qdev->rom->client_monitors_config));
+ if (crc != qdev->rom->client_monitors_config_crc) {
+ qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
+ sizeof(qdev->rom->client_monitors_config),
+ qdev->rom->client_monitors_config_crc);
+ return 1;
+ }
+ if (num_monitors > qdev->monitors_config->max_allowed) {
+ DRM_INFO("client monitors list will be truncated: %d < %d\n",
+ qdev->monitors_config->max_allowed, num_monitors);
+ num_monitors = qdev->monitors_config->max_allowed;
+ } else {
+ num_monitors = qdev->rom->client_monitors_config.count;
+ }
+ qxl_alloc_client_monitors_config(qdev, num_monitors);
+ /* we copy max from the client but it isn't used */
+ qdev->client_monitors_config->max_allowed =
+ qdev->monitors_config->max_allowed;
+ for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
+ struct qxl_urect *c_rect =
+ &qdev->rom->client_monitors_config.heads[i];
+ struct qxl_head *client_head =
+ &qdev->client_monitors_config->heads[i];
+ struct qxl_head *head = &qdev->monitors_config->heads[i];
+ client_head->x = head->x = c_rect->left;
+ client_head->y = head->y = c_rect->top;
+ client_head->width = head->width =
+ c_rect->right - c_rect->left;
+ client_head->height = head->height =
+ c_rect->bottom - c_rect->top;
+ client_head->surface_id = head->surface_id = 0;
+ client_head->id = head->id = i;
+ client_head->flags = head->flags = 0;
+ QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
+ head->x, head->y);
+ }
+ return 0;
+}
+
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
+{
+
+ while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+ qxl_io_log(qdev, "failed crc check for client_monitors_config,"
+ " retrying\n");
+ }
+ qxl_crtc_set_from_monitors_config(qdev);
+ /* fire off a uevent and let userspace tell us what to do */
+ qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
+ drm_sysfs_hotplug_event(qdev->ddev);
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_output *output = drm_connector_to_qxl_output(connector);
+ int h = output->index;
+ struct drm_display_mode *mode = NULL;
+ struct qxl_head *head;
+
+ if (!qdev->monitors_config)
+ return 0;
+ head = &qdev->monitors_config->heads[h];
+
+ mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
+ false);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+static int qxl_add_common_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ int i;
+ struct mode_size {
+ int w;
+ int h;
+ } common_modes[] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ if (common_modes[i].w < 320 || common_modes[i].h < 200)
+ continue;
+
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
+ 60, false, false, false);
+ if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ }
+ return i - 1;
+}
+
+static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ /* TODO */
+}
+
+static void qxl_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(qxl_crtc);
+}
+
+static void
+qxl_hide_cursor(struct qxl_device *qdev)
+{
+ struct qxl_release *release;
+ struct qxl_cursor_cmd *cmd;
+ int ret;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_HIDE;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_unreserve(qdev, release);
+}
+
+static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct qxl_cursor *cursor;
+ struct qxl_cursor_cmd *cmd;
+ struct qxl_bo *cursor_bo, *user_bo;
+ struct qxl_release *release;
+ void *user_ptr;
+
+ int size = 64*64*4;
+ int ret = 0;
+ if (!handle) {
+ qxl_hide_cursor(qdev);
+ return 0;
+ }
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("cannot find cursor object\n");
+ return -ENOENT;
+ }
+
+ user_bo = gem_to_qxl_bo(obj);
+
+ ret = qxl_bo_reserve(user_bo, false);
+ if (ret)
+ goto out_unref;
+
+ ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ if (ret)
+ goto out_unreserve;
+
+ ret = qxl_bo_kmap(user_bo, &user_ptr);
+ if (ret)
+ goto out_unpin;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+ QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+ if (ret)
+ goto out_kunmap;
+ ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
+ &cursor_bo);
+ if (ret)
+ goto out_free_release;
+ ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+ if (ret)
+ goto out_free_bo;
+
+ cursor->header.unique = 0;
+ cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
+ cursor->header.width = 64;
+ cursor->header.height = 64;
+ cursor->header.hot_spot_x = 0;
+ cursor->header.hot_spot_y = 0;
+ cursor->data_size = size;
+ cursor->chunk.next_chunk = 0;
+ cursor->chunk.prev_chunk = 0;
+ cursor->chunk.data_size = size;
+
+ memcpy(cursor->chunk.data, user_ptr, size);
+
+ qxl_bo_kunmap(cursor_bo);
+
+ /* finish with the userspace bo */
+ qxl_bo_kunmap(user_bo);
+ qxl_bo_unpin(user_bo);
+ qxl_bo_unreserve(user_bo);
+ drm_gem_object_unreference_unlocked(obj);
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_SET;
+ cmd->u.set.position.x = qcrtc->cur_x;
+ cmd->u.set.position.y = qcrtc->cur_y;
+
+ cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+ qxl_release_add_res(qdev, release, cursor_bo);
+
+ cmd->u.set.visible = 1;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_unreserve(qdev, release);
+
+ qxl_bo_unreserve(cursor_bo);
+ qxl_bo_unref(&cursor_bo);
+
+ return ret;
+out_free_bo:
+ qxl_bo_unref(&cursor_bo);
+out_free_release:
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+out_kunmap:
+ qxl_bo_kunmap(user_bo);
+out_unpin:
+ qxl_bo_unpin(user_bo);
+out_unreserve:
+ qxl_bo_unreserve(user_bo);
+out_unref:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+ struct qxl_release *release;
+ struct qxl_cursor_cmd *cmd;
+ int ret;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+
+ qcrtc->cur_x = x;
+ qcrtc->cur_y = y;
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_MOVE;
+ cmd->u.position.x = qcrtc->cur_x;
+ cmd->u.position.y = qcrtc->cur_y;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_unreserve(qdev, release);
+ return 0;
+}
+
+
+static const struct drm_crtc_funcs qxl_crtc_funcs = {
+ .cursor_set = qxl_crtc_cursor_set,
+ .cursor_move = qxl_crtc_cursor_move,
+ .gamma_set = qxl_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = qxl_crtc_destroy,
+};
+
+static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+
+ if (qxl_fb->obj)
+ drm_gem_object_unreference_unlocked(qxl_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(qxl_fb);
+}
+
+static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ /* TODO: vmwgfx where this was cribbed from had locking. Why? */
+ struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+ struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
+ struct drm_clip_rect norect;
+ struct qxl_bo *qobj;
+ int inc = 1;
+
+ qobj = gem_to_qxl_bo(qxl_fb->obj);
+ if (qxl_fb != qdev->active_user_framebuffer) {
+ DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
+ __func__, qxl_fb, qdev->active_user_framebuffer);
+ }
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = fb->width;
+ norect.y2 = fb->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ inc = 2; /* skip source rects */
+ }
+
+ qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
+ clips, num_clips, inc);
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs qxl_fb_funcs = {
+ .destroy = qxl_user_framebuffer_destroy,
+ .dirty = qxl_framebuffer_surface_dirty,
+/* TODO?
+ * .create_handle = qxl_user_framebuffer_create_handle, */
+};
+
+int
+qxl_framebuffer_init(struct drm_device *dev,
+ struct qxl_framebuffer *qfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ qfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+ if (ret) {
+ qfb->obj = NULL;
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
+ return 0;
+}
+
+static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct qxl_device *qdev = dev->dev_private;
+
+ qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
+ __func__,
+ mode->hdisplay, mode->vdisplay,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ return true;
+}
+
+void
+qxl_send_monitors_config(struct qxl_device *qdev)
+{
+ int i;
+
+ BUG_ON(!qdev->ram_header->monitors_config);
+
+ if (qdev->monitors_config->count == 0) {
+ qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
+ return;
+ }
+ for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+ struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+ if (head->y > 8192 || head->y < head->x ||
+ head->width > 8192 || head->height > 8192) {
+ DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+ i, head->width, head->height,
+ head->x, head->y);
+ return;
+ }
+ }
+ qxl_io_monitors_config(qdev);
+}
+
+static void qxl_monitors_config_set_single(struct qxl_device *qdev,
+ unsigned x, unsigned y,
+ unsigned width, unsigned height)
+{
+ DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
+ qdev->monitors_config->count = 1;
+ qdev->monitors_config->heads[0].x = x;
+ qdev->monitors_config->heads[0].y = y;
+ qdev->monitors_config->heads[0].width = width;
+ qdev->monitors_config->heads[0].height = height;
+}
+
+static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_mode *m = (void *)mode->private;
+ struct qxl_framebuffer *qfb;
+ struct qxl_bo *bo, *old_bo = NULL;
+ uint32_t width, height, base_offset;
+ bool recreate_primary = false;
+ int ret;
+
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (old_fb) {
+ qfb = to_qxl_framebuffer(old_fb);
+ old_bo = gem_to_qxl_bo(qfb->obj);
+ }
+ qfb = to_qxl_framebuffer(crtc->fb);
+ bo = gem_to_qxl_bo(qfb->obj);
+ if (!m)
+ /* and do we care? */
+ DRM_DEBUG("%dx%d: not a native mode\n", x, y);
+ else
+ DRM_DEBUG("%dx%d: qxl id %d\n",
+ mode->hdisplay, mode->vdisplay, m->id);
+ DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
+ x, y,
+ mode->hdisplay, mode->vdisplay,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+
+ recreate_primary = true;
+
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ base_offset = 0;
+
+ ret = qxl_bo_reserve(bo, false);
+ if (ret != 0)
+ return ret;
+ ret = qxl_bo_pin(bo, bo->type, NULL);
+ if (ret != 0) {
+ qxl_bo_unreserve(bo);
+ return -EINVAL;
+ }
+ qxl_bo_unreserve(bo);
+ if (recreate_primary) {
+ qxl_io_destroy_primary(qdev);
+ qxl_io_log(qdev,
+ "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+ width, height, bo->surf.width,
+ bo->surf.height, bo->surf.stride, bo->surf.format);
+ qxl_io_create_primary(qdev, width, height, base_offset, bo);
+ bo->is_primary = true;
+ }
+
+ if (old_bo && old_bo != bo) {
+ old_bo->is_primary = false;
+ ret = qxl_bo_reserve(old_bo, false);
+ qxl_bo_unpin(old_bo);
+ qxl_bo_unreserve(old_bo);
+ }
+
+ if (qdev->monitors_config->count == 0) {
+ qxl_monitors_config_set_single(qdev, x, y,
+ mode->hdisplay,
+ mode->vdisplay);
+ }
+ qdev->mode_set = true;
+ return 0;
+}
+
+static void qxl_crtc_prepare(struct drm_crtc *crtc)
+{
+ DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
+ crtc->mode.hdisplay, crtc->mode.vdisplay,
+ crtc->x, crtc->y, crtc->enabled);
+}
+
+static void qxl_crtc_commit(struct drm_crtc *crtc)
+{
+ DRM_DEBUG("\n");
+}
+
+static void qxl_crtc_load_lut(struct drm_crtc *crtc)
+{
+ DRM_DEBUG("\n");
+}
+
+static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
+ .dpms = qxl_crtc_dpms,
+ .mode_fixup = qxl_crtc_mode_fixup,
+ .mode_set = qxl_crtc_mode_set,
+ .prepare = qxl_crtc_prepare,
+ .commit = qxl_crtc_commit,
+ .load_lut = qxl_crtc_load_lut,
+};
+
+static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
+{
+ struct qxl_crtc *qxl_crtc;
+
+ qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
+ if (!qxl_crtc)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
+ drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
+ return 0;
+}
+
+static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
+{
+ DRM_DEBUG("\n");
+}
+
+static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG("\n");
+ return true;
+}
+
+static void qxl_enc_prepare(struct drm_encoder *encoder)
+{
+ DRM_DEBUG("\n");
+}
+
+static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
+ struct drm_encoder *encoder)
+{
+ int i;
+ struct qxl_head *head;
+ struct drm_display_mode *mode;
+
+ BUG_ON(!encoder);
+ /* TODO: ugly, do better */
+ for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
+ ;
+ if (encoder->possible_crtcs != (1 << i)) {
+ DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
+ encoder->possible_crtcs);
+ return;
+ }
+ if (!qdev->monitors_config ||
+ qdev->monitors_config->max_allowed <= i) {
+ DRM_ERROR(
+ "head number too large or missing monitors config: %p, %d",
+ qdev->monitors_config,
+ qdev->monitors_config ?
+ qdev->monitors_config->max_allowed : -1);
+ return;
+ }
+ if (!encoder->crtc) {
+ DRM_ERROR("missing crtc on encoder %p\n", encoder);
+ return;
+ }
+ if (i != 0)
+ DRM_DEBUG("missing for multiple monitors: no head holes\n");
+ head = &qdev->monitors_config->heads[i];
+ head->id = i;
+ head->surface_id = 0;
+ if (encoder->crtc->enabled) {
+ mode = &encoder->crtc->mode;
+ head->width = mode->hdisplay;
+ head->height = mode->vdisplay;
+ head->x = encoder->crtc->x;
+ head->y = encoder->crtc->y;
+ if (qdev->monitors_config->count < i + 1)
+ qdev->monitors_config->count = i + 1;
+ } else {
+ head->width = 0;
+ head->height = 0;
+ head->x = 0;
+ head->y = 0;
+ }
+ DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
+ i, head->x, head->y, head->width, head->height);
+ head->flags = 0;
+ /* TODO - somewhere else to call this for multiple monitors
+ * (config_commit?) */
+ qxl_send_monitors_config(qdev);
+}
+
+static void qxl_enc_commit(struct drm_encoder *encoder)
+{
+ struct qxl_device *qdev = encoder->dev->dev_private;
+
+ qxl_write_monitors_config_for_encoder(qdev, encoder);
+ DRM_DEBUG("\n");
+}
+
+static void qxl_enc_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG("\n");
+}
+
+static int qxl_conn_get_modes(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct qxl_device *qdev = connector->dev->dev_private;
+
+ DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
+ /* TODO: what should we do here? only show the configured modes for the
+ * device, or allow the full list, or both? */
+ if (qdev->monitors_config && qdev->monitors_config->count) {
+ ret = qxl_add_monitors_config_modes(connector);
+ if (ret < 0)
+ return ret;
+ }
+ ret += qxl_add_common_modes(connector);
+ return ret;
+}
+
+static int qxl_conn_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO: is this called for user defined modes? (xrandr --add-mode)
+ * TODO: check that the mode fits in the framebuffer */
+ DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+ mode->vdisplay, mode->status);
+ return MODE_OK;
+}
+
+static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+{
+ struct qxl_output *qxl_output =
+ drm_connector_to_qxl_output(connector);
+
+ DRM_DEBUG("\n");
+ return &qxl_output->enc;
+}
+
+
+static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
+ .dpms = qxl_enc_dpms,
+ .mode_fixup = qxl_enc_mode_fixup,
+ .prepare = qxl_enc_prepare,
+ .mode_set = qxl_enc_mode_set,
+ .commit = qxl_enc_commit,
+};
+
+static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
+ .get_modes = qxl_conn_get_modes,
+ .mode_valid = qxl_conn_mode_valid,
+ .best_encoder = qxl_best_encoder,
+};
+
+static void qxl_conn_save(struct drm_connector *connector)
+{
+ DRM_DEBUG("\n");
+}
+
+static void qxl_conn_restore(struct drm_connector *connector)
+{
+ DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status qxl_conn_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct qxl_output *output =
+ drm_connector_to_qxl_output(connector);
+ struct drm_device *ddev = connector->dev;
+ struct qxl_device *qdev = ddev->dev_private;
+ int connected;
+
+ /* The first monitor is always connected */
+ connected = (output->index == 0) ||
+ (qdev->monitors_config &&
+ qdev->monitors_config->count > output->index);
+
+ DRM_DEBUG("\n");
+ return connected ? connector_status_connected
+ : connector_status_disconnected;
+}
+
+static int qxl_conn_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+static void qxl_conn_destroy(struct drm_connector *connector)
+{
+ struct qxl_output *qxl_output =
+ drm_connector_to_qxl_output(connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(qxl_output);
+}
+
+static const struct drm_connector_funcs qxl_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = qxl_conn_save,
+ .restore = qxl_conn_restore,
+ .detect = qxl_conn_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = qxl_conn_set_property,
+ .destroy = qxl_conn_destroy,
+};
+
+static void qxl_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs qxl_enc_funcs = {
+ .destroy = qxl_enc_destroy,
+};
+
+static int qdev_output_init(struct drm_device *dev, int num_output)
+{
+ struct qxl_output *qxl_output;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
+ if (!qxl_output)
+ return -ENOMEM;
+
+ qxl_output->index = num_output;
+
+ connector = &qxl_output->base;
+ encoder = &qxl_output->enc;
+ drm_connector_init(dev, &qxl_output->base,
+ &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+
+ drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+
+ encoder->possible_crtcs = 1 << num_output;
+ drm_mode_connector_attach_encoder(&qxl_output->base,
+ &qxl_output->enc);
+ drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
+ drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+ return 0;
+}
+
+static struct drm_framebuffer *
+qxl_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct qxl_framebuffer *qxl_fb;
+ struct qxl_device *qdev = dev->dev_private;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+
+ qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
+ if (qxl_fb == NULL)
+ return NULL;
+
+ ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(qxl_fb);
+ drm_gem_object_unreference_unlocked(obj);
+ return NULL;
+ }
+
+ if (qdev->active_user_framebuffer) {
+ DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
+ __func__,
+ qdev->active_user_framebuffer, qxl_fb);
+ }
+ qdev->active_user_framebuffer = qxl_fb;
+
+ return &qxl_fb->base;
+}
+
+static const struct drm_mode_config_funcs qxl_mode_funcs = {
+ .fb_create = qxl_user_framebuffer_create,
+};
+
+int qxl_modeset_init(struct qxl_device *qdev)
+{
+ int i;
+ int ret;
+ struct drm_gem_object *gobj;
+ int max_allowed = QXL_NUM_OUTPUTS;
+ int monitors_config_size = sizeof(struct qxl_monitors_config) +
+ max_allowed * sizeof(struct qxl_head);
+
+ drm_mode_config_init(qdev->ddev);
+ ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
+ QXL_GEM_DOMAIN_VRAM,
+ false, false, NULL, &gobj);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
+ return -ENOMEM;
+ }
+ qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
+ qxl_bo_kmap(qdev->monitors_config_bo, NULL);
+ qdev->monitors_config = qdev->monitors_config_bo->kptr;
+ qdev->ram_header->monitors_config =
+ qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
+
+ memset(qdev->monitors_config, 0, monitors_config_size);
+ qdev->monitors_config->max_allowed = max_allowed;
+
+ qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+
+ /* modes will be validated against the framebuffer size */
+ qdev->ddev->mode_config.min_width = 320;
+ qdev->ddev->mode_config.min_height = 200;
+ qdev->ddev->mode_config.max_width = 8192;
+ qdev->ddev->mode_config.max_height = 8192;
+
+ qdev->ddev->mode_config.fb_base = qdev->vram_base;
+ for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
+ qdev_crtc_init(qdev->ddev, i);
+ qdev_output_init(qdev->ddev, i);
+ }
+
+ qdev->mode_info.mode_config_initialized = true;
+
+ /* primary surface must be created by this point, to allow
+ * issuing command queue commands and having them read by
+ * spice server. */
+ qxl_fbdev_init(qdev);
+ return 0;
+}
+
+void qxl_modeset_fini(struct qxl_device *qdev)
+{
+ qxl_fbdev_fini(qdev);
+ if (qdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(qdev->ddev);
+ qdev->mode_info.mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644
index 0000000..3c8c3db
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* returns a pointer to the already allocated qxl_rect array inside
+ * the qxl_clip_rects. This is *not* the same as the memory allocated
+ * on the device, it is offset to qxl_clip_rects.chunk.data */
+static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
+ struct qxl_drawable *drawable,
+ unsigned num_clips,
+ struct qxl_bo **clips_bo,
+ struct qxl_release *release)
+{
+ struct qxl_clip_rects *dev_clips;
+ int ret;
+ int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
+ ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
+ if (ret)
+ return NULL;
+
+ ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+ if (ret) {
+ qxl_bo_unref(clips_bo);
+ return NULL;
+ }
+ dev_clips->num_rects = num_clips;
+ dev_clips->chunk.next_chunk = 0;
+ dev_clips->chunk.prev_chunk = 0;
+ dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
+ return (struct qxl_rect *)dev_clips->chunk.data;
+}
+
+static int
+make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
+ const struct qxl_rect *rect,
+ struct qxl_release **release)
+{
+ struct qxl_drawable *drawable;
+ int i, ret;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
+ QXL_RELEASE_DRAWABLE, release,
+ NULL);
+ if (ret)
+ return ret;
+
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
+ drawable->type = type;
+
+ drawable->surface_id = surface; /* Only primary for now */
+ drawable->effect = QXL_EFFECT_OPAQUE;
+ drawable->self_bitmap = 0;
+ drawable->self_bitmap_area.top = 0;
+ drawable->self_bitmap_area.left = 0;
+ drawable->self_bitmap_area.bottom = 0;
+ drawable->self_bitmap_area.right = 0;
+ /* FIXME: add clipping */
+ drawable->clip.type = SPICE_CLIP_TYPE_NONE;
+
+ /*
+ * surfaces_dest[i] should apparently be filled out with the
+ * surfaces that we depend on, and surface_rects should be
+ * filled with the rectangles of those surfaces that we
+ * are going to use.
+ */
+ for (i = 0; i < 3; ++i)
+ drawable->surfaces_dest[i] = -1;
+
+ if (rect)
+ drawable->bbox = *rect;
+
+ drawable->mm_time = qdev->rom->mm_clock;
+ qxl_release_unmap(qdev, *release, &drawable->release_info);
+ return 0;
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+ const struct qxl_fb_image *qxl_fb_image)
+{
+ struct qxl_device *qdev = qxl_fb_image->qdev;
+ const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+ uint32_t visual = qxl_fb_image->visual;
+ const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
+ struct qxl_palette *pal;
+ int ret;
+ uint32_t fgcolor, bgcolor;
+ static uint64_t unique; /* we make no attempt to actually set this
+ * correctly globaly, since that would require
+ * tracking all of our palettes. */
+
+ ret = qxl_alloc_bo_reserved(qdev,
+ sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+ palette_bo);
+
+ ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+ pal->num_ents = 2;
+ pal->unique = unique++;
+ if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+ /* NB: this is the only used branch currently. */
+ fgcolor = pseudo_palette[fb_image->fg_color];
+ bgcolor = pseudo_palette[fb_image->bg_color];
+ } else {
+ fgcolor = fb_image->fg_color;
+ bgcolor = fb_image->bg_color;
+ }
+ pal->ents[0] = bgcolor;
+ pal->ents[1] = fgcolor;
+ qxl_bo_kunmap(*palette_bo);
+ return 0;
+}
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+ int stride /* filled in if 0 */)
+{
+ struct qxl_device *qdev = qxl_fb_image->qdev;
+ struct qxl_drawable *drawable;
+ struct qxl_rect rect;
+ const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+ int x = fb_image->dx;
+ int y = fb_image->dy;
+ int width = fb_image->width;
+ int height = fb_image->height;
+ const char *src = fb_image->data;
+ int depth = fb_image->depth;
+ struct qxl_release *release;
+ struct qxl_bo *image_bo;
+ struct qxl_image *image;
+ int ret;
+
+ if (stride == 0)
+ stride = depth * width / 8;
+
+ rect.left = x;
+ rect.right = x + width;
+ rect.top = y;
+ rect.bottom = y + height;
+
+ ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
+ if (ret)
+ return;
+
+ ret = qxl_image_create(qdev, release, &image_bo,
+ (const uint8_t *)src, 0, 0,
+ width, height, depth, stride);
+ if (ret) {
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+ return;
+ }
+
+ if (depth == 1) {
+ struct qxl_bo *palette_bo;
+ void *ptr;
+ ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
+ qxl_release_add_res(qdev, release, palette_bo);
+
+ ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+ image = ptr;
+ image->u.bitmap.palette =
+ qxl_bo_physical_address(qdev, palette_bo, 0);
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+ qxl_bo_unreserve(palette_bo);
+ qxl_bo_unref(&palette_bo);
+ }
+
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+ drawable->u.copy.src_area.top = 0;
+ drawable->u.copy.src_area.bottom = height;
+ drawable->u.copy.src_area.left = 0;
+ drawable->u.copy.src_area.right = width;
+
+ drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+ drawable->u.copy.scale_mode = 0;
+ drawable->u.copy.mask.flags = 0;
+ drawable->u.copy.mask.pos.x = 0;
+ drawable->u.copy.mask.pos.y = 0;
+ drawable->u.copy.mask.bitmap = 0;
+
+ drawable->u.copy.src_bitmap =
+ qxl_bo_physical_address(qdev, image_bo, 0);
+ qxl_release_unmap(qdev, release, &drawable->release_info);
+
+ qxl_release_add_res(qdev, release, image_bo);
+ qxl_bo_unreserve(image_bo);
+ qxl_bo_unref(&image_bo);
+
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+ qxl_release_unreserve(qdev, release);
+}
+
+/* push a draw command using the given clipping rectangles as
+ * the sources from the shadow framebuffer.
+ *
+ * Right now implementing with a single draw and a clip list. Clip
+ * lists are known to be a problem performance wise, this can be solved
+ * by treating them differently in the server.
+ */
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+ struct qxl_framebuffer *qxl_fb,
+ struct qxl_bo *bo,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int inc)
+{
+ /*
+ * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
+ * send a fill command instead, much cheaper.
+ *
+ * See include/drm/drm_mode.h
+ */
+ struct drm_clip_rect *clips_ptr;
+ int i;
+ int left, right, top, bottom;
+ int width, height;
+ struct qxl_drawable *drawable;
+ struct qxl_rect drawable_rect;
+ struct qxl_rect *rects;
+ int stride = qxl_fb->base.pitches[0];
+ /* depth is not actually interesting, we don't mask with it */
+ int depth = qxl_fb->base.bits_per_pixel;
+ uint8_t *surface_base;
+ struct qxl_release *release;
+ struct qxl_bo *image_bo;
+ struct qxl_bo *clips_bo;
+ int ret;
+
+ left = clips->x1;
+ right = clips->x2;
+ top = clips->y1;
+ bottom = clips->y2;
+
+ /* skip the first clip rect */
+ for (i = 1, clips_ptr = clips + inc;
+ i < num_clips; i++, clips_ptr += inc) {
+ left = min_t(int, left, (int)clips_ptr->x1);
+ right = max_t(int, right, (int)clips_ptr->x2);
+ top = min_t(int, top, (int)clips_ptr->y1);
+ bottom = max_t(int, bottom, (int)clips_ptr->y2);
+ }
+
+ width = right - left;
+ height = bottom - top;
+ drawable_rect.left = left;
+ drawable_rect.right = right;
+ drawable_rect.top = top;
+ drawable_rect.bottom = bottom;
+ ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
+ &release);
+ if (ret)
+ return;
+
+ ret = qxl_bo_kmap(bo, (void **)&surface_base);
+ if (ret)
+ goto out_unref;
+
+ ret = qxl_image_create(qdev, release, &image_bo, surface_base,
+ left, top, width, height, depth, stride);
+ qxl_bo_kunmap(bo);
+ if (ret)
+ goto out_unref;
+
+ rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
+ if (!rects) {
+ qxl_bo_unref(&image_bo);
+ goto out_unref;
+ }
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+ drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
+ drawable->clip.data = qxl_bo_physical_address(qdev,
+ clips_bo, 0);
+ qxl_release_add_res(qdev, release, clips_bo);
+
+ drawable->u.copy.src_area.top = 0;
+ drawable->u.copy.src_area.bottom = height;
+ drawable->u.copy.src_area.left = 0;
+ drawable->u.copy.src_area.right = width;
+
+ drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+ drawable->u.copy.scale_mode = 0;
+ drawable->u.copy.mask.flags = 0;
+ drawable->u.copy.mask.pos.x = 0;
+ drawable->u.copy.mask.pos.y = 0;
+ drawable->u.copy.mask.bitmap = 0;
+
+ drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+ qxl_release_unmap(qdev, release, &drawable->release_info);
+ qxl_release_add_res(qdev, release, image_bo);
+ qxl_bo_unreserve(image_bo);
+ qxl_bo_unref(&image_bo);
+ clips_ptr = clips;
+ for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+ rects[i].left = clips_ptr->x1;
+ rects[i].right = clips_ptr->x2;
+ rects[i].top = clips_ptr->y1;
+ rects[i].bottom = clips_ptr->y2;
+ }
+ qxl_bo_kunmap(clips_bo);
+ qxl_bo_unreserve(clips_bo);
+ qxl_bo_unref(&clips_bo);
+
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+ qxl_release_unreserve(qdev, release);
+ return;
+
+out_unref:
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+}
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+ u32 width, u32 height,
+ u32 sx, u32 sy,
+ u32 dx, u32 dy)
+{
+ struct qxl_drawable *drawable;
+ struct qxl_rect rect;
+ struct qxl_release *release;
+ int ret;
+
+ rect.left = dx;
+ rect.top = dy;
+ rect.right = dx + width;
+ rect.bottom = dy + height;
+ ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
+ if (ret)
+ return;
+
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+ drawable->u.copy_bits.src_pos.x = sx;
+ drawable->u.copy_bits.src_pos.y = sy;
+
+ qxl_release_unmap(qdev, release, &drawable->release_info);
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+ qxl_release_unreserve(qdev, release);
+}
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
+{
+ struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
+ struct qxl_rect rect = qxl_draw_fill_rec->rect;
+ uint32_t color = qxl_draw_fill_rec->color;
+ uint16_t rop = qxl_draw_fill_rec->rop;
+ struct qxl_drawable *drawable;
+ struct qxl_release *release;
+ int ret;
+
+ ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+ if (ret)
+ return;
+
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+ drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
+ drawable->u.fill.brush.u.color = color;
+ drawable->u.fill.rop_descriptor = rop;
+ drawable->u.fill.mask.flags = 0;
+ drawable->u.fill.mask.pos.x = 0;
+ drawable->u.fill.mask.pos.y = 0;
+ drawable->u.fill.mask.bitmap = 0;
+
+ qxl_release_unmap(qdev, release, &drawable->release_info);
+ qxl_fence_releaseable(qdev, release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+ qxl_release_unreserve(qdev, release);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644
index 0000000..aa291d8
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -0,0 +1,145 @@
+/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
+/* qxl_drv.c -- QXL driver -*- linux-c -*-
+ *
+ * Copyright 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlie@redhat.com>
+ * Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "qxl_drv.h"
+
+extern int qxl_max_ioctls;
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+ 0xffff00, 0 },
+ { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
+ 0xffff00, 0 },
+ { 0, 0, 0 },
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int qxl_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, qxl_modeset, int, 0400);
+
+static struct drm_driver qxl_driver;
+static struct pci_driver qxl_pci_driver;
+
+static int
+qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ if (pdev->revision < 4) {
+ DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
+ " use xf86-video-qxl in user mode");
+ return -EINVAL; /* TODO: ENODEV ? */
+ }
+ return drm_get_pci_dev(pdev, ent, &qxl_driver);
+}
+
+static void
+qxl_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static struct pci_driver qxl_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = qxl_pci_probe,
+ .remove = qxl_pci_remove,
+};
+
+static const struct file_operations qxl_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .mmap = qxl_mmap,
+};
+
+static struct drm_driver qxl_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+ .dev_priv_size = 0,
+ .load = qxl_driver_load,
+ .unload = qxl_driver_unload,
+
+ .dumb_create = qxl_mode_dumb_create,
+ .dumb_map_offset = qxl_mode_dumb_mmap,
+ .dumb_destroy = qxl_mode_dumb_destroy,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = qxl_debugfs_init,
+ .debugfs_cleanup = qxl_debugfs_takedown,
+#endif
+ .gem_init_object = qxl_gem_object_init,
+ .gem_free_object = qxl_gem_object_free,
+ .gem_open_object = qxl_gem_object_open,
+ .gem_close_object = qxl_gem_object_close,
+ .fops = &qxl_fops,
+ .ioctls = qxl_ioctls,
+ .irq_handler = qxl_irq_handler,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = 0,
+ .minor = 1,
+ .patchlevel = 0,
+};
+
+static int __init qxl_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && qxl_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (qxl_modeset == 0)
+ return -EINVAL;
+ qxl_driver.num_ioctls = qxl_max_ioctls;
+ return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+}
+
+static void __exit qxl_exit(void)
+{
+ drm_pci_exit(&qxl_driver, &qxl_pci_driver);
+}
+
+module_init(qxl_init);
+module_exit(qxl_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644
index 0000000..52b582c
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+
+#ifndef QXL_DRV_H
+#define QXL_DRV_H
+
+/*
+ * Definitions taken from spice-protocol, plus kernel driver specific bits.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#include <drm/qxl_drm.h>
+#include "qxl_dev.h"
+
+#define DRIVER_AUTHOR "Dave Airlie"
+
+#define DRIVER_NAME "qxl"
+#define DRIVER_DESC "RH QXL"
+#define DRIVER_DATE "20120117"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define QXL_NUM_OUTPUTS 1
+
+#define QXL_DEBUGFS_MAX_COMPONENTS 32
+
+extern int qxl_log_level;
+
+enum {
+ QXL_INFO_LEVEL = 1,
+ QXL_DEBUG_LEVEL = 2,
+};
+
+#define QXL_INFO(qdev, fmt, ...) do { \
+ if (qxl_log_level >= QXL_INFO_LEVEL) { \
+ qxl_io_log(qdev, fmt, __VA_ARGS__); \
+ } \
+ } while (0)
+#define QXL_DEBUG(qdev, fmt, ...) do { \
+ if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
+ qxl_io_log(qdev, fmt, __VA_ARGS__); \
+ } \
+ } while (0)
+#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
+ static int done; \
+ if (!done) { \
+ done = 1; \
+ QXL_INFO(qdev, fmt, __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+#define QXL_INTERRUPT_MASK (\
+ QXL_INTERRUPT_DISPLAY |\
+ QXL_INTERRUPT_CURSOR |\
+ QXL_INTERRUPT_IO_CMD |\
+ QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
+
+struct qxl_fence {
+ struct qxl_device *qdev;
+ uint32_t num_active_releases;
+ uint32_t *release_ids;
+ struct radix_tree_root tree;
+};
+
+struct qxl_bo {
+ /* Protected by gem.mutex */
+ struct list_head list;
+ /* Protected by tbo.reserved */
+ u32 placements[3];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ void *kptr;
+ int type;
+ /* Constant after initialization */
+ struct drm_gem_object gem_base;
+ bool is_primary; /* is this now a primary surface */
+ bool hw_surf_alloc;
+ struct qxl_surface surf;
+ uint32_t surface_id;
+ struct qxl_fence fence; /* per bo fence - list of releases */
+ struct qxl_release *surf_create;
+ atomic_t reserve_count;
+};
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+
+struct qxl_gem {
+ struct mutex mutex;
+ struct list_head objects;
+};
+
+struct qxl_bo_list {
+ struct list_head lhead;
+ struct qxl_bo *bo;
+};
+
+struct qxl_reloc_list {
+ struct list_head bos;
+};
+
+struct qxl_crtc {
+ struct drm_crtc base;
+ int cur_x;
+ int cur_y;
+};
+
+struct qxl_output {
+ int index;
+ struct drm_connector base;
+ struct drm_encoder enc;
+};
+
+struct qxl_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
+#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
+
+struct qxl_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct drm_global_reference mem_global_ref;
+ bool mem_global_referenced;
+ struct ttm_bo_device bdev;
+};
+
+struct qxl_mode_info {
+ int num_modes;
+ struct qxl_mode *modes;
+ bool mode_config_initialized;
+
+ /* pointer to fbdev info structure */
+ struct qxl_fbdev *qfbdev;
+};
+
+
+struct qxl_memslot {
+ uint8_t generation;
+ uint64_t start_phys_addr;
+ uint64_t end_phys_addr;
+ uint64_t high_bits;
+};
+
+enum {
+ QXL_RELEASE_DRAWABLE,
+ QXL_RELEASE_SURFACE_CMD,
+ QXL_RELEASE_CURSOR_CMD,
+};
+
+/* drm_ prefix to differentiate from qxl_release_info in
+ * spice-protocol/qxl_dev.h */
+#define QXL_MAX_RES 96
+struct qxl_release {
+ int id;
+ int type;
+ int bo_count;
+ uint32_t release_offset;
+ uint32_t surface_release_id;
+ struct qxl_bo *bos[QXL_MAX_RES];
+};
+
+struct qxl_fb_image {
+ struct qxl_device *qdev;
+ uint32_t pseudo_palette[16];
+ struct fb_image fb_image;
+ uint32_t visual;
+};
+
+struct qxl_draw_fill {
+ struct qxl_device *qdev;
+ struct qxl_rect rect;
+ uint32_t color;
+ uint16_t rop;
+};
+
+/*
+ * Debugfs
+ */
+struct qxl_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+
+int qxl_debugfs_add_files(struct qxl_device *rdev,
+ struct drm_info_list *files,
+ unsigned nfiles);
+int qxl_debugfs_fence_init(struct qxl_device *rdev);
+void qxl_debugfs_remove_files(struct qxl_device *qdev);
+
+struct qxl_device;
+
+struct qxl_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct pci_dev *pdev;
+ unsigned long flags;
+
+ resource_size_t vram_base, vram_size;
+ resource_size_t surfaceram_base, surfaceram_size;
+ resource_size_t rom_base, rom_size;
+ struct qxl_rom *rom;
+
+ struct qxl_mode *modes;
+ struct qxl_bo *monitors_config_bo;
+ struct qxl_monitors_config *monitors_config;
+
+ /* last received client_monitors_config */
+ struct qxl_monitors_config *client_monitors_config;
+
+ int io_base;
+ void *ram;
+ struct qxl_mman mman;
+ struct qxl_gem gem;
+ struct qxl_mode_info mode_info;
+
+ /*
+ * last created framebuffer with fb_create
+ * only used by debugfs dumbppm
+ */
+ struct qxl_framebuffer *active_user_framebuffer;
+
+ struct fb_info *fbdev_info;
+ struct qxl_framebuffer *fbdev_qfb;
+ void *ram_physical;
+
+ struct qxl_ring *release_ring;
+ struct qxl_ring *command_ring;
+ struct qxl_ring *cursor_ring;
+
+ struct qxl_ram_header *ram_header;
+ bool mode_set;
+
+ bool primary_created;
+
+ struct qxl_memslot *mem_slots;
+ uint8_t n_mem_slots;
+
+ uint8_t main_mem_slot;
+ uint8_t surfaces_mem_slot;
+ uint8_t slot_id_bits;
+ uint8_t slot_gen_bits;
+ uint64_t va_slot_mask;
+
+ struct idr release_idr;
+ spinlock_t release_idr_lock;
+ struct mutex async_io_mutex;
+ unsigned int last_sent_io_cmd;
+
+ /* interrupt handling */
+ atomic_t irq_received;
+ atomic_t irq_received_display;
+ atomic_t irq_received_cursor;
+ atomic_t irq_received_io_cmd;
+ unsigned irq_received_error;
+ wait_queue_head_t display_event;
+ wait_queue_head_t cursor_event;
+ wait_queue_head_t io_cmd_event;
+ struct work_struct client_monitors_config_work;
+
+ /* debugfs */
+ struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
+ unsigned debugfs_count;
+
+ struct mutex update_area_mutex;
+
+ struct idr surf_id_idr;
+ spinlock_t surf_id_idr_lock;
+ int last_alloced_surf_id;
+
+ struct mutex surf_evict_mutex;
+ struct io_mapping *vram_mapping;
+ struct io_mapping *surface_mapping;
+
+ /* */
+ struct mutex release_mutex;
+ struct qxl_bo *current_release_bo[3];
+ int current_release_bo_offset[3];
+
+ struct workqueue_struct *gc_queue;
+ struct work_struct gc_work;
+
+};
+
+/* forward declaration for QXL_INFO_IO */
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
+
+extern struct drm_ioctl_desc qxl_ioctls[];
+extern int qxl_max_ioctl;
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags);
+int qxl_driver_unload(struct drm_device *dev);
+
+int qxl_modeset_init(struct qxl_device *qdev);
+void qxl_modeset_fini(struct qxl_device *qdev);
+
+int qxl_bo_init(struct qxl_device *qdev);
+void qxl_bo_fini(struct qxl_device *qdev);
+
+struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
+ int element_size,
+ int n_elements,
+ int prod_notify,
+ bool set_prod_notify,
+ wait_queue_head_t *push_event);
+void qxl_ring_free(struct qxl_ring *ring);
+
+static inline void *
+qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
+{
+ QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+ return 0;
+}
+
+static inline uint64_t
+qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
+ unsigned long offset)
+{
+ int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+ struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+ /* TODO - need to hold one of the locks to read tbo.offset */
+ return slot->high_bits | (bo->tbo.offset + offset);
+}
+
+/* qxl_fb.c */
+#define QXLFB_CONN_LIMIT 1
+
+int qxl_fbdev_init(struct qxl_device *qdev);
+void qxl_fbdev_fini(struct qxl_device *qdev);
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+ struct drm_file *file_priv,
+ uint32_t *handle);
+
+/* qxl_display.c */
+int
+qxl_framebuffer_init(struct drm_device *dev,
+ struct qxl_framebuffer *rfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
+void qxl_send_monitors_config(struct qxl_device *qdev);
+
+/* used by qxl_debugfs only */
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
+
+/* qxl_gem.c */
+int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_fini(struct qxl_device *qdev);
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct qxl_surface *surf,
+ struct drm_gem_object **obj);
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ uint64_t *gpu_addr);
+void qxl_gem_object_unpin(struct drm_gem_object *obj);
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ struct drm_file *file_priv,
+ u32 domain,
+ size_t size,
+ struct qxl_surface *surf,
+ struct qxl_bo **qobj,
+ uint32_t *handle);
+int qxl_gem_object_init(struct drm_gem_object *obj);
+void qxl_gem_object_free(struct drm_gem_object *gobj);
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void qxl_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+void qxl_bo_force_delete(struct qxl_device *qdev);
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+
+/* qxl_dumb.c */
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+int qxl_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+
+
+/* qxl ttm */
+int qxl_ttm_init(struct qxl_device *qdev);
+void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* qxl image */
+
+int qxl_image_create(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **image_bo,
+ const uint8_t *data,
+ int x, int y, int width, int height,
+ int depth, int stride);
+void qxl_update_screen(struct qxl_device *qxl);
+
+/* qxl io operations (qxl_cmd.c) */
+
+void qxl_io_create_primary(struct qxl_device *qdev,
+ unsigned width, unsigned height, unsigned offset,
+ struct qxl_bo *bo);
+void qxl_io_destroy_primary(struct qxl_device *qdev);
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
+void qxl_io_notify_oom(struct qxl_device *qdev);
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+ const struct qxl_rect *area);
+
+void qxl_io_reset(struct qxl_device *qdev);
+void qxl_io_monitors_config(struct qxl_device *qdev);
+int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
+void qxl_io_flush_release(struct qxl_device *qdev);
+void qxl_io_flush_surfaces(struct qxl_device *qdev);
+
+int qxl_release_reserve(struct qxl_device *qdev,
+ struct qxl_release *release, bool no_wait);
+void qxl_release_unreserve(struct qxl_device *qdev,
+ struct qxl_release *release);
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+ struct qxl_release *release);
+void qxl_release_unmap(struct qxl_device *qdev,
+ struct qxl_release *release,
+ union qxl_release_info *info);
+/*
+ * qxl_bo_add_resource.
+ *
+ */
+void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+ enum qxl_surface_cmd_type surface_cmd_type,
+ struct qxl_release *create_rel,
+ struct qxl_release **release);
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+ int type, struct qxl_release **release,
+ struct qxl_bo **rbo);
+int qxl_fence_releaseable(struct qxl_device *qdev,
+ struct qxl_release *release);
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+ uint32_t type, bool interruptible);
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+ uint32_t type, bool interruptible);
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+ struct qxl_bo **_bo);
+/* qxl drawing commands */
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+ int stride /* filled in if 0 */);
+
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+ struct qxl_framebuffer *qxl_fb,
+ struct qxl_bo *bo,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int inc);
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+ u32 width, u32 height,
+ u32 sx, u32 sy,
+ u32 dx, u32 dy);
+
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+ struct qxl_release **ret);
+
+void qxl_release_free(struct qxl_device *qdev,
+ struct qxl_release *release);
+void qxl_release_add_res(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo *bo);
+/* used by qxl_debugfs_release */
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+ uint64_t id);
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
+int qxl_garbage_collect(struct qxl_device *qdev);
+
+/* debugfs */
+
+int qxl_debugfs_init(struct drm_minor *minor);
+void qxl_debugfs_takedown(struct drm_minor *minor);
+
+/* qxl_irq.c */
+int qxl_irq_init(struct qxl_device *qdev);
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+
+/* qxl_fb.c */
+int qxl_fb_init(struct qxl_device *qdev);
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned nfiles);
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+ struct qxl_bo *surf);
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+ uint32_t surface_id);
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+ struct qxl_bo *surf,
+ struct ttm_mem_reg *mem);
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+ struct qxl_bo *surf);
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
+
+struct qxl_drv_surface *
+qxl_surface_lookup(struct drm_device *dev, int surface_id);
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
+
+/* qxl_fence.c */
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
+void qxl_fence_fini(struct qxl_fence *qfence);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644
index 0000000..847c4ee
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* dumb ioctls implementation */
+
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_bo *qobj;
+ uint32_t handle;
+ int r;
+ struct qxl_surface surf;
+ uint32_t pitch, format;
+ pitch = args->width * ((args->bpp + 1) / 8);
+ args->size = pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ switch (args->bpp) {
+ case 16:
+ format = SPICE_SURFACE_FMT_16_565;
+ break;
+ case 32:
+ format = SPICE_SURFACE_FMT_32_xRGB;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ surf.width = args->width;
+ surf.height = args->height;
+ surf.stride = pitch;
+ surf.format = format;
+ r = qxl_gem_object_create_with_handle(qdev, file_priv,
+ QXL_GEM_DOMAIN_VRAM,
+ args->size, &surf, &qobj,
+ &handle);
+ if (r)
+ return r;
+ args->pitch = pitch;
+ args->handle = handle;
+ return 0;
+}
+
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int qxl_mode_dumb_mmap(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ struct drm_gem_object *gobj;
+ struct qxl_bo *qobj;
+
+ BUG_ON(!offset_p);
+ gobj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gobj == NULL)
+ return -ENOENT;
+ qobj = gem_to_qxl_bo(gobj);
+ *offset_p = qxl_bo_mmap_offset(qobj);
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644
index 0000000..b3c5127
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright © 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "drm/drm_crtc.h"
+#include "drm/drm_crtc_helper.h"
+#include "qxl_drv.h"
+
+#include "qxl_object.h"
+#include "drm_fb_helper.h"
+
+#define QXL_DIRTY_DELAY (HZ / 30)
+
+struct qxl_fbdev {
+ struct drm_fb_helper helper;
+ struct qxl_framebuffer qfb;
+ struct list_head fbdev_list;
+ struct qxl_device *qdev;
+
+ void *shadow;
+ int size;
+
+ /* dirty memory logging */
+ struct {
+ spinlock_t lock;
+ bool active;
+ unsigned x1;
+ unsigned y1;
+ unsigned x2;
+ unsigned y2;
+ } dirty;
+};
+
+static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
+ struct qxl_device *qdev, struct fb_info *info,
+ const struct fb_image *image)
+{
+ qxl_fb_image->qdev = qdev;
+ if (info) {
+ qxl_fb_image->visual = info->fix.visual;
+ if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
+ qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
+ memcpy(&qxl_fb_image->pseudo_palette,
+ info->pseudo_palette,
+ sizeof(qxl_fb_image->pseudo_palette));
+ } else {
+ /* fallback */
+ if (image->depth == 1)
+ qxl_fb_image->visual = FB_VISUAL_MONO10;
+ else
+ qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
+ }
+ if (image) {
+ memcpy(&qxl_fb_image->fb_image, image,
+ sizeof(qxl_fb_image->fb_image));
+ }
+}
+
+static void qxl_fb_dirty_flush(struct fb_info *info)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+ struct qxl_fb_image qxl_fb_image;
+ struct fb_image *image = &qxl_fb_image.fb_image;
+ u32 x1, x2, y1, y2;
+
+ /* TODO: hard coding 32 bpp */
+ int stride = qfbdev->qfb.base.pitches[0] * 4;
+
+ x1 = qfbdev->dirty.x1;
+ x2 = qfbdev->dirty.x2;
+ y1 = qfbdev->dirty.y1;
+ y2 = qfbdev->dirty.y2;
+ /*
+ * we are using a shadow draw buffer, at qdev->surface0_shadow
+ */
+ qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
+ image->dx = x1;
+ image->dy = y1;
+ image->width = x2 - x1;
+ image->height = y2 - y1;
+ image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+ warnings */
+ image->bg_color = 0;
+ image->depth = 32; /* TODO: take from somewhere? */
+ image->cmap.start = 0;
+ image->cmap.len = 0;
+ image->cmap.red = NULL;
+ image->cmap.green = NULL;
+ image->cmap.blue = NULL;
+ image->cmap.transp = NULL;
+ image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
+
+ qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+ qxl_draw_opaque_fb(&qxl_fb_image, stride);
+ qfbdev->dirty.x1 = 0;
+ qfbdev->dirty.x2 = 0;
+ qfbdev->dirty.y1 = 0;
+ qfbdev->dirty.y2 = 0;
+}
+
+static void qxl_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ unsigned long start, end, min, max;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+
+ /* TODO: add spin lock? */
+ /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
+ qfbdev->dirty.x1 = 0;
+ qfbdev->dirty.y1 = y1;
+ qfbdev->dirty.x2 = info->var.xres;
+ qfbdev->dirty.y2 = y2;
+ /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
+ }
+
+ qxl_fb_dirty_flush(info);
+};
+
+
+static struct fb_deferred_io qxl_defio = {
+ .delay = QXL_DIRTY_DELAY,
+ .deferred_io = qxl_deferred_io,
+};
+
+static void qxl_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+ struct qxl_rect rect;
+ uint32_t color;
+ int x = fb_rect->dx;
+ int y = fb_rect->dy;
+ int width = fb_rect->width;
+ int height = fb_rect->height;
+ uint16_t rop;
+ struct qxl_draw_fill qxl_draw_fill_rec;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
+ else
+ color = fb_rect->color;
+ rect.left = x;
+ rect.right = x + width;
+ rect.top = y;
+ rect.bottom = y + height;
+ switch (fb_rect->rop) {
+ case ROP_XOR:
+ rop = SPICE_ROPD_OP_XOR;
+ break;
+ case ROP_COPY:
+ rop = SPICE_ROPD_OP_PUT;
+ break;
+ default:
+ pr_err("qxl_fb_fillrect(): unknown rop, "
+ "defaulting to SPICE_ROPD_OP_PUT\n");
+ rop = SPICE_ROPD_OP_PUT;
+ }
+ qxl_draw_fill_rec.qdev = qdev;
+ qxl_draw_fill_rec.rect = rect;
+ qxl_draw_fill_rec.color = color;
+ qxl_draw_fill_rec.rop = rop;
+ if (!drm_can_sleep()) {
+ qxl_io_log(qdev,
+ "%s: TODO use RCU, mysterious locks with spin_lock\n",
+ __func__);
+ return;
+ }
+ qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+
+ qxl_draw_copyarea(qfbdev->qdev,
+ region->width, region->height,
+ region->sx, region->sy,
+ region->dx, region->dy);
+}
+
+static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
+{
+ qxl_draw_opaque_fb(qxl_fb_image, 0);
+}
+
+static void qxl_fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+ struct qxl_fb_image qxl_fb_image;
+
+ if (!drm_can_sleep()) {
+ /* we cannot do any ttm_bo allocation since that will fail on
+ * ioremap_wc..__get_vm_area_node, so queue the work item
+ * instead This can happen from printk inside an interrupt
+ * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
+ qxl_io_log(qdev,
+ "%s: TODO use RCU, mysterious locks with spin_lock\n",
+ __func__);
+ return;
+ }
+
+ /* ensure proper order of rendering operations - TODO: must do this
+ * for everything. */
+ qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+ qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
+int qxl_fb_init(struct qxl_device *qdev)
+{
+ return 0;
+}
+
+static struct fb_ops qxlfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ .fb_fillrect = qxl_fb_fillrect,
+ .fb_copyarea = qxl_fb_copyarea,
+ .fb_imageblit = qxl_fb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
+ int ret;
+
+ ret = qxl_bo_reserve(qbo, false);
+ if (likely(ret == 0)) {
+ qxl_bo_kunmap(qbo);
+ qxl_bo_unpin(qbo);
+ qxl_bo_unreserve(qbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
+
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+ struct drm_file *file_priv,
+ uint32_t *handle)
+{
+ int r;
+ struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
+
+ BUG_ON(!gobj);
+ /* drm_get_handle_create adds a reference - good */
+ r = drm_gem_handle_create(file_priv, gobj, handle);
+ if (r)
+ return r;
+ return 0;
+}
+
+static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct qxl_device *qdev = qfbdev->qdev;
+ struct drm_gem_object *gobj = NULL;
+ struct qxl_bo *qbo = NULL;
+ int ret;
+ int aligned_size, size;
+ int height = mode_cmd->height;
+ int bpp;
+ int depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
+
+ size = mode_cmd->pitches[0] * height;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+ /* TODO: unallocate and reallocate surface0 for real. Hack to just
+ * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
+ ret = qxl_gem_object_create(qdev, aligned_size, 0,
+ QXL_GEM_DOMAIN_SURFACE,
+ false, /* is discardable */
+ false, /* is kernel (false means device) */
+ NULL,
+ &gobj);
+ if (ret) {
+ pr_err("failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
+ }
+ qbo = gem_to_qxl_bo(gobj);
+
+ qbo->surf.width = mode_cmd->width;
+ qbo->surf.height = mode_cmd->height;
+ qbo->surf.stride = mode_cmd->pitches[0];
+ qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
+ ret = qxl_bo_reserve(qbo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+ ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+ if (ret) {
+ qxl_bo_unreserve(qbo);
+ goto out_unref;
+ }
+ ret = qxl_bo_kmap(qbo, NULL);
+ qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
+ if (ret)
+ goto out_unref;
+
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ qxlfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
+
+static int qxlfb_create(struct qxl_fbdev *qfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct qxl_device *qdev = qfbdev->qdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct qxl_bo *qbo = NULL;
+ struct device *device = &qdev->pdev->dev;
+ int ret;
+ int size;
+ int bpp = sizes->surface_bpp;
+ int depth = sizes->surface_depth;
+ void *shadow;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+ qbo = gem_to_qxl_bo(gobj);
+ QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
+ mode_cmd.height, mode_cmd.pitches[0]);
+
+ shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+ /* TODO: what's the usual response to memory allocation errors? */
+ BUG_ON(!shadow);
+ QXL_INFO(qdev,
+ "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+ qxl_bo_gpu_offset(qbo),
+ qxl_bo_mmap_offset(qbo),
+ qbo->kptr,
+ shadow);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ info->par = qfbdev;
+
+ qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+
+ fb = &qfbdev->qfb.base;
+
+ /* setup helper with fb data */
+ qfbdev->helper.fb = fb;
+ qfbdev->helper.fbdev = info;
+ qfbdev->shadow = shadow;
+ strcpy(info->fix.id, "qxldrmfb");
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+ info->fbops = &qxlfb_ops;
+
+ /*
+ * TODO: using gobj->size in various places in this function. Not sure
+ * what the difference between the different sizes is.
+ */
+ info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
+ info->fix.smem_len = gobj->size;
+ info->screen_base = qfbdev->shadow;
+ info->screen_size = gobj->size;
+
+ drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].size = qdev->vram_size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ if (info->screen_base == NULL) {
+ ret = -ENOSPC;
+ goto out_unref;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ info->fbdefio = &qxl_defio;
+ fb_deferred_io_init(info);
+
+ qdev->fbdev_info = info;
+ qdev->fbdev_qfb = &qfbdev->qfb;
+ DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
+ DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+ return 0;
+
+out_unref:
+ if (qbo) {
+ ret = qxl_bo_reserve(qbo, false);
+ if (likely(ret == 0)) {
+ qxl_bo_kunmap(qbo);
+ qxl_bo_unpin(qbo);
+ qxl_bo_unreserve(qbo);
+ }
+ }
+ if (fb && ret) {
+ drm_gem_object_unreference(gobj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+ }
+ drm_gem_object_unreference(gobj);
+ return ret;
+}
+
+static int qxl_fb_find_or_create_single(
+ struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = qxlfb_create(qfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
+{
+ struct fb_info *info;
+ struct qxl_framebuffer *qfb = &qfbdev->qfb;
+
+ if (qfbdev->helper.fbdev) {
+ info = qfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ framebuffer_release(info);
+ }
+ if (qfb->obj) {
+ qxlfb_destroy_pinned_object(qfb->obj);
+ qfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&qfbdev->helper);
+ vfree(qfbdev->shadow);
+ drm_framebuffer_cleanup(&qfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+ /* TODO
+ .gamma_set = qxl_crtc_fb_gamma_set,
+ .gamma_get = qxl_crtc_fb_gamma_get,
+ */
+ .fb_probe = qxl_fb_find_or_create_single,
+};
+
+int qxl_fbdev_init(struct qxl_device *qdev)
+{
+ struct qxl_fbdev *qfbdev;
+ int bpp_sel = 32; /* TODO: parameter from somewhere? */
+ int ret;
+
+ qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
+ if (!qfbdev)
+ return -ENOMEM;
+
+ qfbdev->qdev = qdev;
+ qdev->mode_info.qfbdev = qfbdev;
+ qfbdev->helper.funcs = &qxl_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
+ 1 /* num_crtc - QXL supports just 1 */,
+ QXLFB_CONN_LIMIT);
+ if (ret) {
+ kfree(qfbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+ drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+ return 0;
+}
+
+void qxl_fbdev_fini(struct qxl_device *qdev)
+{
+ if (!qdev->mode_info.qfbdev)
+ return;
+
+ qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+ kfree(qdev->mode_info.qfbdev);
+ qdev->mode_info.qfbdev = NULL;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644
index 0000000..63c6715
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+
+#include "qxl_drv.h"
+
+/* QXL fencing-
+
+ When we submit operations to the GPU we pass a release reference to the GPU
+ with them, the release reference is then added to the release ring when
+ the GPU is finished with that particular operation and has removed it from
+ its tree.
+
+ So we have can have multiple outstanding non linear fences per object.
+
+ From a TTM POV we only care if the object has any outstanding releases on
+ it.
+
+ we wait until all outstanding releases are processeed.
+
+ sync object is just a list of release ids that represent that fence on
+ that buffer.
+
+ we just add new releases onto the sync object attached to the object.
+
+ This currently uses a radix tree to store the list of release ids.
+
+ For some reason every so often qxl hw fails to release, things go wrong.
+*/
+
+
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+ struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+ spin_lock(&bo->tbo.bdev->fence_lock);
+ radix_tree_insert(&qfence->tree, rel_id, qfence);
+ qfence->num_active_releases++;
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ return 0;
+}
+
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+ void *ret;
+ int retval = 0;
+ struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+ spin_lock(&bo->tbo.bdev->fence_lock);
+
+ ret = radix_tree_delete(&qfence->tree, rel_id);
+ if (ret == qfence)
+ qfence->num_active_releases--;
+ else {
+ DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
+ retval = -ENOENT;
+ }
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ return retval;
+}
+
+
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
+{
+ qfence->qdev = qdev;
+ qfence->num_active_releases = 0;
+ INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
+ return 0;
+}
+
+void qxl_fence_fini(struct qxl_fence *qfence)
+{
+ kfree(qfence->release_ids);
+ qfence->num_active_releases = 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644
index 0000000..a235693
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+int qxl_gem_object_init(struct drm_gem_object *obj)
+{
+ /* we do nothings here */
+ return 0;
+}
+
+void qxl_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+
+ if (qobj)
+ qxl_bo_unref(&qobj);
+}
+
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct qxl_surface *surf,
+ struct drm_gem_object **obj)
+{
+ struct qxl_bo *qbo;
+ int r;
+
+ *obj = NULL;
+ /* At least align on page size */
+ if (alignment < PAGE_SIZE)
+ alignment = PAGE_SIZE;
+ r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR(
+ "Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
+ return r;
+ }
+ *obj = &qbo->gem_base;
+
+ mutex_lock(&qdev->gem.mutex);
+ list_add_tail(&qbo->list, &qdev->gem.objects);
+ mutex_unlock(&qdev->gem.mutex);
+
+ return 0;
+}
+
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ struct drm_file *file_priv,
+ u32 domain,
+ size_t size,
+ struct qxl_surface *surf,
+ struct qxl_bo **qobj,
+ uint32_t *handle)
+{
+ struct drm_gem_object *gobj;
+ int r;
+
+ BUG_ON(!qobj);
+ BUG_ON(!handle);
+
+ r = qxl_gem_object_create(qdev, size, 0,
+ domain,
+ false, false, surf,
+ &gobj);
+ if (r)
+ return -ENOMEM;
+ r = drm_gem_handle_create(file_priv, gobj, handle);
+ if (r)
+ return r;
+ /* drop reference from allocate - handle holds it now */
+ *qobj = gem_to_qxl_bo(gobj);
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
+
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ uint64_t *gpu_addr)
+{
+ struct qxl_bo *qobj = obj->driver_private;
+ int r;
+
+ r = qxl_bo_reserve(qobj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
+ qxl_bo_unreserve(qobj);
+ return r;
+}
+
+void qxl_gem_object_unpin(struct drm_gem_object *obj)
+{
+ struct qxl_bo *qobj = obj->driver_private;
+ int r;
+
+ r = qxl_bo_reserve(qobj, false);
+ if (likely(r == 0)) {
+ qxl_bo_unpin(qobj);
+ qxl_bo_unreserve(qobj);
+ }
+}
+
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void qxl_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+}
+
+int qxl_gem_init(struct qxl_device *qdev)
+{
+ INIT_LIST_HEAD(&qdev->gem.objects);
+ return 0;
+}
+
+void qxl_gem_fini(struct qxl_device *qdev)
+{
+ qxl_bo_force_delete(qdev);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644
index 0000000..cf85620
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int
+qxl_image_create_helper(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **image_bo,
+ const uint8_t *data,
+ int width, int height,
+ int depth, unsigned int hash,
+ int stride)
+{
+ struct qxl_image *image;
+ struct qxl_data_chunk *chunk;
+ int i;
+ int chunk_stride;
+ int linesize = width * depth / 8;
+ struct qxl_bo *chunk_bo;
+ int ret;
+ void *ptr;
+ /* Chunk */
+ /* FIXME: Check integer overflow */
+ /* TODO: variable number of chunks */
+ chunk_stride = stride; /* TODO: should use linesize, but it renders
+ wrong (check the bitmaps are sent correctly
+ first) */
+ ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
+ &chunk_bo);
+
+ ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
+ chunk = ptr;
+ chunk->data_size = height * chunk_stride;
+ chunk->prev_chunk = 0;
+ chunk->next_chunk = 0;
+ qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+
+ {
+ void *k_data, *i_data;
+ int remain;
+ int page;
+ int size;
+ if (stride == linesize && chunk_stride == stride) {
+ remain = linesize * height;
+ page = 0;
+ i_data = (void *)data;
+
+ while (remain > 0) {
+ ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
+
+ if (page == 0) {
+ chunk = ptr;
+ k_data = chunk->data;
+ size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
+ } else {
+ k_data = ptr;
+ size = PAGE_SIZE;
+ }
+ size = min(size, remain);
+
+ memcpy(k_data, i_data, size);
+
+ qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+ i_data += size;
+ remain -= size;
+ page++;
+ }
+ } else {
+ unsigned page_base, page_offset, out_offset;
+ for (i = 0 ; i < height ; ++i) {
+ i_data = (void *)data + i * stride;
+ remain = linesize;
+ out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
+
+ while (remain > 0) {
+ page_base = out_offset & PAGE_MASK;
+ page_offset = offset_in_page(out_offset);
+
+ size = min((int)(PAGE_SIZE - page_offset), remain);
+
+ ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
+ k_data = ptr + page_offset;
+ memcpy(k_data, i_data, size);
+ qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+ remain -= size;
+ i_data += size;
+ out_offset += size;
+ }
+ }
+ }
+ }
+
+
+ qxl_bo_kunmap(chunk_bo);
+
+ /* Image */
+ ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
+
+ ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+ image = ptr;
+
+ image->descriptor.id = 0;
+ image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
+
+ image->descriptor.flags = 0;
+ image->descriptor.width = width;
+ image->descriptor.height = height;
+
+ switch (depth) {
+ case 1:
+ /* TODO: BE? check by arch? */
+ image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
+ break;
+ case 24:
+ image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
+ break;
+ case 32:
+ image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
+ break;
+ default:
+ DRM_ERROR("unsupported image bit depth\n");
+ return -EINVAL; /* TODO: cleanup */
+ }
+ image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+ image->u.bitmap.x = width;
+ image->u.bitmap.y = height;
+ image->u.bitmap.stride = chunk_stride;
+ image->u.bitmap.palette = 0;
+ image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+ qxl_release_add_res(qdev, release, chunk_bo);
+ qxl_bo_unreserve(chunk_bo);
+ qxl_bo_unref(&chunk_bo);
+
+ qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+
+ return 0;
+}
+
+int qxl_image_create(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **image_bo,
+ const uint8_t *data,
+ int x, int y, int width, int height,
+ int depth, int stride)
+{
+ data += y * stride + x * (depth / 8);
+ return qxl_image_create_helper(qdev, release, image_bo, data,
+ width, height, depth, 0, stride);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644
index 0000000..04b64f9
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * TODO: allocating a new gem(in qxl_bo) for each request.
+ * This is wasteful since bo's are page aligned.
+ */
+static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_alloc *qxl_alloc = data;
+ int ret;
+ struct qxl_bo *qobj;
+ uint32_t handle;
+ u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+ if (qxl_alloc->size == 0) {
+ DRM_ERROR("invalid size %d\n", qxl_alloc->size);
+ return -EINVAL;
+ }
+ ret = qxl_gem_object_create_with_handle(qdev, file_priv,
+ domain,
+ qxl_alloc->size,
+ NULL,
+ &qobj, &handle);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n",
+ __func__, ret);
+ return -ENOMEM;
+ }
+ qxl_alloc->handle = handle;
+ return 0;
+}
+
+static int qxl_map_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_map *qxl_map = data;
+
+ return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+ &qxl_map->offset);
+}
+
+/*
+ * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
+ * are on vram).
+ * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
+ */
+static void
+apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+ struct qxl_bo *src, uint64_t src_off)
+{
+ void *reloc_page;
+
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+ *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+ src, src_off);
+ qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+static void
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+ struct qxl_bo *src)
+{
+ uint32_t id = 0;
+ void *reloc_page;
+
+ if (src && !src->is_primary)
+ id = src->surface_id;
+
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+ *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
+ qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+/* return holding the reference to this object */
+static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+ struct drm_file *file_priv, uint64_t handle,
+ struct qxl_reloc_list *reloc_list)
+{
+ struct drm_gem_object *gobj;
+ struct qxl_bo *qobj;
+ int ret;
+
+ gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+ if (!gobj) {
+ DRM_ERROR("bad bo handle %lld\n", handle);
+ return NULL;
+ }
+ qobj = gem_to_qxl_bo(gobj);
+
+ ret = qxl_bo_list_add(reloc_list, qobj);
+ if (ret)
+ return NULL;
+
+ return qobj;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full QXLDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * QXLReleaseInfo struct (first XXX bytes)
+ */
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_execbuffer *execbuffer = data;
+ struct drm_qxl_command user_cmd;
+ int cmd_num;
+ struct qxl_bo *reloc_src_bo;
+ struct qxl_bo *reloc_dst_bo;
+ struct drm_qxl_reloc reloc;
+ void *fb_cmd;
+ int i, ret;
+ struct qxl_reloc_list reloc_list;
+ int unwritten;
+ uint32_t reloc_dst_offset;
+ INIT_LIST_HEAD(&reloc_list.bos);
+
+ for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+ struct qxl_release *release;
+ struct qxl_bo *cmd_bo;
+ int release_type;
+ struct drm_qxl_command *commands =
+ (struct drm_qxl_command *)execbuffer->commands;
+
+ if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ sizeof(user_cmd)))
+ return -EFAULT;
+ switch (user_cmd.type) {
+ case QXL_CMD_DRAW:
+ release_type = QXL_RELEASE_DRAWABLE;
+ break;
+ case QXL_CMD_SURFACE:
+ case QXL_CMD_CURSOR:
+ default:
+ DRM_DEBUG("Only draw commands in execbuffers\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+ return -EINVAL;
+
+ ret = qxl_alloc_release_reserved(qdev,
+ sizeof(union qxl_release_info) +
+ user_cmd.command_size,
+ release_type,
+ &release,
+ &cmd_bo);
+ if (ret)
+ return ret;
+
+ /* TODO copy slow path code from i915 */
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+ qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+ if (unwritten) {
+ DRM_ERROR("got unwritten %d\n", unwritten);
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+ return -EFAULT;
+ }
+
+ for (i = 0 ; i < user_cmd.relocs_num; ++i) {
+ if (DRM_COPY_FROM_USER(&reloc,
+ &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+ sizeof(reloc))) {
+ qxl_bo_list_unreserve(&reloc_list, true);
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+ return -EFAULT;
+ }
+
+ /* add the bos to the list of bos to validate -
+ need to validate first then process relocs? */
+ if (reloc.dst_handle) {
+ reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.dst_handle, &reloc_list);
+ if (!reloc_dst_bo) {
+ qxl_bo_list_unreserve(&reloc_list, true);
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+ return -EINVAL;
+ }
+ reloc_dst_offset = 0;
+ } else {
+ reloc_dst_bo = cmd_bo;
+ reloc_dst_offset = release->release_offset;
+ }
+
+ /* reserve and validate the reloc dst bo */
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+ reloc_src_bo =
+ qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.src_handle, &reloc_list);
+ if (!reloc_src_bo) {
+ if (reloc_dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+ qxl_bo_list_unreserve(&reloc_list, true);
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+ return -EINVAL;
+ }
+ } else
+ reloc_src_bo = NULL;
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
+ apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
+ reloc_src_bo, reloc.src_offset);
+ } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
+ apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
+ } else {
+ DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
+ return -EINVAL;
+ }
+
+ if (reloc_src_bo && reloc_src_bo != cmd_bo) {
+ qxl_release_add_res(qdev, release, reloc_src_bo);
+ drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
+ }
+
+ if (reloc_dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+ }
+ qxl_fence_releaseable(qdev, release);
+
+ ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
+ if (ret == -ERESTARTSYS) {
+ qxl_release_unreserve(qdev, release);
+ qxl_release_free(qdev, release);
+ qxl_bo_list_unreserve(&reloc_list, true);
+ return ret;
+ }
+ qxl_release_unreserve(qdev, release);
+ }
+ qxl_bo_list_unreserve(&reloc_list, 0);
+ return 0;
+}
+
+static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_update_area *update_area = data;
+ struct qxl_rect area = {.left = update_area->left,
+ .top = update_area->top,
+ .right = update_area->right,
+ .bottom = update_area->bottom};
+ int ret;
+ struct drm_gem_object *gobj = NULL;
+ struct qxl_bo *qobj = NULL;
+
+ if (update_area->left >= update_area->right ||
+ update_area->top >= update_area->bottom)
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+ if (gobj == NULL)
+ return -ENOENT;
+
+ qobj = gem_to_qxl_bo(gobj);
+
+ ret = qxl_bo_reserve(qobj, false);
+ if (ret)
+ goto out;
+
+ if (!qobj->pin_count) {
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+ true, false);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = qxl_bo_check_id(qdev, qobj);
+ if (ret)
+ goto out2;
+ if (!qobj->surface_id)
+ DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
+ ret = qxl_io_update_area(qdev, qobj, &area);
+
+out2:
+ qxl_bo_unreserve(qobj);
+
+out:
+ drm_gem_object_unreference_unlocked(gobj);
+ return ret;
+}
+
+static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_getparam *param = data;
+
+ switch (param->param) {
+ case QXL_PARAM_NUM_SURFACES:
+ param->value = qdev->rom->n_surfaces;
+ break;
+ case QXL_PARAM_MAX_RELOCS:
+ param->value = QXL_MAX_RES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_clientcap *param = data;
+ int byte, idx;
+
+ byte = param->index / 8;
+ idx = param->index % 8;
+
+ if (qdev->pdev->revision < 4)
+ return -ENOSYS;
+
+ if (byte >= 58)
+ return -ENOSYS;
+
+ if (qdev->rom->client_capabilities[byte] & (1 << idx))
+ return 0;
+ return -ENOSYS;
+}
+
+static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_alloc_surf *param = data;
+ struct qxl_bo *qobj;
+ int handle;
+ int ret;
+ int size, actual_stride;
+ struct qxl_surface surf;
+
+ /* work out size allocate bo with handle */
+ actual_stride = param->stride < 0 ? -param->stride : param->stride;
+ size = actual_stride * param->height + actual_stride;
+
+ surf.format = param->format;
+ surf.width = param->width;
+ surf.height = param->height;
+ surf.stride = param->stride;
+ surf.data = 0;
+
+ ret = qxl_gem_object_create_with_handle(qdev, file,
+ QXL_GEM_DOMAIN_SURFACE,
+ size,
+ &surf,
+ &qobj, &handle);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n",
+ __func__, ret);
+ return -ENOMEM;
+ } else
+ param->handle = handle;
+ return ret;
+}
+
+struct drm_ioctl_desc qxl_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
+ DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
+ DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
+ DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
+ DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
+ DRM_AUTH|DRM_UNLOCKED),
+};
+
+int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644
index 0000000..21393dc
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include "qxl_drv.h"
+
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+ uint32_t pending;
+
+ pending = xchg(&qdev->ram_header->int_pending, 0);
+
+ atomic_inc(&qdev->irq_received);
+
+ if (pending & QXL_INTERRUPT_DISPLAY) {
+ atomic_inc(&qdev->irq_received_display);
+ wake_up_all(&qdev->display_event);
+ qxl_queue_garbage_collect(qdev, false);
+ }
+ if (pending & QXL_INTERRUPT_CURSOR) {
+ atomic_inc(&qdev->irq_received_cursor);
+ wake_up_all(&qdev->cursor_event);
+ }
+ if (pending & QXL_INTERRUPT_IO_CMD) {
+ atomic_inc(&qdev->irq_received_io_cmd);
+ wake_up_all(&qdev->io_cmd_event);
+ }
+ if (pending & QXL_INTERRUPT_ERROR) {
+ /* TODO: log it, reset device (only way to exit this condition)
+ * (do it a certain number of times, afterwards admit defeat,
+ * to avoid endless loops).
+ */
+ qdev->irq_received_error++;
+ qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
+ }
+ if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
+ qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
+ schedule_work(&qdev->client_monitors_config_work);
+ }
+ qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+ outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
+ return IRQ_HANDLED;
+}
+
+static void qxl_client_monitors_config_work_func(struct work_struct *work)
+{
+ struct qxl_device *qdev = container_of(work, struct qxl_device,
+ client_monitors_config_work);
+
+ qxl_display_read_client_monitors_config(qdev);
+}
+
+int qxl_irq_init(struct qxl_device *qdev)
+{
+ int ret;
+
+ init_waitqueue_head(&qdev->display_event);
+ init_waitqueue_head(&qdev->cursor_event);
+ init_waitqueue_head(&qdev->io_cmd_event);
+ INIT_WORK(&qdev->client_monitors_config_work,
+ qxl_client_monitors_config_work_func);
+ atomic_set(&qdev->irq_received, 0);
+ atomic_set(&qdev->irq_received_display, 0);
+ atomic_set(&qdev->irq_received_cursor, 0);
+ atomic_set(&qdev->irq_received_io_cmd, 0);
+ qdev->irq_received_error = 0;
+ ret = drm_irq_install(qdev->ddev);
+ qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644
index 0000000..85127ed
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+
+int qxl_log_level;
+
+static void qxl_dump_mode(struct qxl_device *qdev, void *p)
+{
+ struct qxl_mode *m = p;
+ DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
+ m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
+ m->y_mili, m->orientation);
+}
+
+static bool qxl_check_device(struct qxl_device *qdev)
+{
+ struct qxl_rom *rom = qdev->rom;
+ int mode_offset;
+ int i;
+
+ if (rom->magic != 0x4f525851) {
+ DRM_ERROR("bad rom signature %x\n", rom->magic);
+ return false;
+ }
+
+ DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
+ DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
+ rom->log_level);
+ DRM_INFO("Currently using mode #%d, list at 0x%x\n",
+ rom->mode, rom->modes_offset);
+ DRM_INFO("%d io pages at offset 0x%x\n",
+ rom->num_io_pages, rom->pages_offset);
+ DRM_INFO("%d byte draw area at offset 0x%x\n",
+ rom->surface0_area_size, rom->draw_area_offset);
+
+ qdev->vram_size = rom->surface0_area_size;
+ DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
+
+ mode_offset = rom->modes_offset / 4;
+ qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
+ DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
+ qdev->mode_info.num_modes);
+ qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
+ for (i = 0; i < qdev->mode_info.num_modes; i++)
+ qxl_dump_mode(qdev, qdev->mode_info.modes + i);
+ return true;
+}
+
+static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
+ unsigned long start_phys_addr, unsigned long end_phys_addr)
+{
+ uint64_t high_bits;
+ struct qxl_memslot *slot;
+ uint8_t slot_index;
+ struct qxl_ram_header *ram_header = qdev->ram_header;
+
+ slot_index = qdev->rom->slots_start + slot_index_offset;
+ slot = &qdev->mem_slots[slot_index];
+ slot->start_phys_addr = start_phys_addr;
+ slot->end_phys_addr = end_phys_addr;
+ ram_header->mem_slot.mem_start = slot->start_phys_addr;
+ ram_header->mem_slot.mem_end = slot->end_phys_addr;
+ qxl_io_memslot_add(qdev, slot_index);
+ slot->generation = qdev->rom->slot_generation;
+ high_bits = slot_index << qdev->slot_gen_bits;
+ high_bits |= slot->generation;
+ high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
+ slot->high_bits = high_bits;
+ return slot_index;
+}
+
+static void qxl_gc_work(struct work_struct *work)
+{
+ struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+ qxl_garbage_collect(qdev);
+}
+
+int qxl_device_init(struct qxl_device *qdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ unsigned long flags)
+{
+ int r;
+
+ qdev->dev = &pdev->dev;
+ qdev->ddev = ddev;
+ qdev->pdev = pdev;
+ qdev->flags = flags;
+
+ mutex_init(&qdev->gem.mutex);
+ mutex_init(&qdev->update_area_mutex);
+ mutex_init(&qdev->release_mutex);
+ mutex_init(&qdev->surf_evict_mutex);
+ INIT_LIST_HEAD(&qdev->gem.objects);
+
+ qdev->rom_base = pci_resource_start(pdev, 2);
+ qdev->rom_size = pci_resource_len(pdev, 2);
+ qdev->vram_base = pci_resource_start(pdev, 0);
+ qdev->surfaceram_base = pci_resource_start(pdev, 1);
+ qdev->surfaceram_size = pci_resource_len(pdev, 1);
+ qdev->io_base = pci_resource_start(pdev, 3);
+
+ qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+ qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
+ DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
+ (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+ (int)pci_resource_len(pdev, 0) / 1024 / 1024,
+ (int)pci_resource_len(pdev, 0) / 1024,
+ (void *)qdev->surfaceram_base,
+ (void *)pci_resource_end(pdev, 1),
+ (int)qdev->surfaceram_size / 1024 / 1024,
+ (int)qdev->surfaceram_size / 1024);
+
+ qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
+ if (!qdev->rom) {
+ pr_err("Unable to ioremap ROM\n");
+ return -ENOMEM;
+ }
+
+ qxl_check_device(qdev);
+
+ r = qxl_bo_init(qdev);
+ if (r) {
+ DRM_ERROR("bo init failed %d\n", r);
+ return r;
+ }
+
+ qdev->ram_header = ioremap(qdev->vram_base +
+ qdev->rom->ram_header_offset,
+ sizeof(*qdev->ram_header));
+
+ qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
+ sizeof(struct qxl_command),
+ QXL_COMMAND_RING_SIZE,
+ qdev->io_base + QXL_IO_NOTIFY_CMD,
+ false,
+ &qdev->display_event);
+
+ qdev->cursor_ring = qxl_ring_create(
+ &(qdev->ram_header->cursor_ring_hdr),
+ sizeof(struct qxl_command),
+ QXL_CURSOR_RING_SIZE,
+ qdev->io_base + QXL_IO_NOTIFY_CMD,
+ false,
+ &qdev->cursor_event);
+
+ qdev->release_ring = qxl_ring_create(
+ &(qdev->ram_header->release_ring_hdr),
+ sizeof(uint64_t),
+ QXL_RELEASE_RING_SIZE, 0, true,
+ NULL);
+
+ /* TODO - slot initialization should happen on reset. where is our
+ * reset handler? */
+ qdev->n_mem_slots = qdev->rom->slots_end;
+ qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
+ qdev->slot_id_bits = qdev->rom->slot_id_bits;
+ qdev->va_slot_mask =
+ (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
+
+ qdev->mem_slots =
+ kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
+ GFP_KERNEL);
+
+ idr_init(&qdev->release_idr);
+ spin_lock_init(&qdev->release_idr_lock);
+
+ idr_init(&qdev->surf_id_idr);
+ spin_lock_init(&qdev->surf_id_idr_lock);
+
+ mutex_init(&qdev->async_io_mutex);
+
+ /* reset the device into a known state - no memslots, no primary
+ * created, no surfaces. */
+ qxl_io_reset(qdev);
+
+ /* must initialize irq before first async io - slot creation */
+ r = qxl_irq_init(qdev);
+ if (r)
+ return r;
+
+ /*
+ * Note that virtual is surface0. We rely on the single ioremap done
+ * before.
+ */
+ qdev->main_mem_slot = setup_slot(qdev, 0,
+ (unsigned long)qdev->vram_base,
+ (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
+ qdev->surfaces_mem_slot = setup_slot(qdev, 1,
+ (unsigned long)qdev->surfaceram_base,
+ (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
+ DRM_INFO("main mem slot %d [%lx,%x)\n",
+ qdev->main_mem_slot,
+ (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+
+
+ qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
+ INIT_WORK(&qdev->gc_work, qxl_gc_work);
+
+ r = qxl_fb_init(qdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void qxl_device_fini(struct qxl_device *qdev)
+{
+ if (qdev->current_release_bo[0])
+ qxl_bo_unref(&qdev->current_release_bo[0]);
+ if (qdev->current_release_bo[1])
+ qxl_bo_unref(&qdev->current_release_bo[1]);
+ flush_workqueue(qdev->gc_queue);
+ destroy_workqueue(qdev->gc_queue);
+ qdev->gc_queue = NULL;
+
+ qxl_ring_free(qdev->command_ring);
+ qxl_ring_free(qdev->cursor_ring);
+ qxl_ring_free(qdev->release_ring);
+ qxl_bo_fini(qdev);
+ io_mapping_free(qdev->surface_mapping);
+ io_mapping_free(qdev->vram_mapping);
+ iounmap(qdev->ram_header);
+ iounmap(qdev->rom);
+ qdev->rom = NULL;
+ qdev->mode_info.modes = NULL;
+ qdev->mode_info.num_modes = 0;
+ qxl_debugfs_remove_files(qdev);
+}
+
+int qxl_driver_unload(struct drm_device *dev)
+{
+ struct qxl_device *qdev = dev->dev_private;
+
+ if (qdev == NULL)
+ return 0;
+ qxl_modeset_fini(qdev);
+ qxl_device_fini(qdev);
+
+ kfree(qdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct qxl_device *qdev;
+ int r;
+
+ /* require kms */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+ if (qdev == NULL)
+ return -ENOMEM;
+
+ dev->dev_private = qdev;
+
+ r = qxl_device_init(qdev, dev, dev->pdev, flags);
+ if (r)
+ goto out;
+
+ r = qxl_modeset_init(qdev);
+ if (r) {
+ qxl_driver_unload(dev);
+ goto out;
+ }
+
+ return 0;
+out:
+ kfree(qdev);
+ return r;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644
index 0000000..d9b12e7
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+ struct qxl_bo *bo;
+ struct qxl_device *qdev;
+
+ bo = container_of(tbo, struct qxl_bo, tbo);
+ qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+
+ qxl_surface_evict(qdev, bo, false);
+ qxl_fence_fini(&bo->fence);
+ mutex_lock(&qdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&qdev->gem.mutex);
+ drm_gem_object_release(&bo->gem_base);
+ kfree(bo);
+}
+
+bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &qxl_ttm_bo_destroy)
+ return true;
+ return false;
+}
+
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+{
+ u32 c = 0;
+
+ qbo->placement.fpfn = 0;
+ qbo->placement.lpfn = 0;
+ qbo->placement.placement = qbo->placements;
+ qbo->placement.busy_placement = qbo->placements;
+ if (domain == QXL_GEM_DOMAIN_VRAM)
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+ if (domain == QXL_GEM_DOMAIN_SURFACE)
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+ if (domain == QXL_GEM_DOMAIN_CPU)
+ qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ qbo->placement.num_placement = c;
+ qbo->placement.num_busy_placement = c;
+}
+
+
+int qxl_bo_create(struct qxl_device *qdev,
+ unsigned long size, bool kernel, u32 domain,
+ struct qxl_surface *surf,
+ struct qxl_bo **bo_ptr)
+{
+ struct qxl_bo *bo;
+ enum ttm_bo_type type;
+ int r;
+
+ if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+ qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+ if (kernel)
+ type = ttm_bo_type_kernel;
+ else
+ type = ttm_bo_type_device;
+ *bo_ptr = NULL;
+ bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+ size = roundup(size, PAGE_SIZE);
+ r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+ if (unlikely(r)) {
+ kfree(bo);
+ return r;
+ }
+ bo->gem_base.driver_private = NULL;
+ bo->type = domain;
+ bo->pin_count = 0;
+ bo->surface_id = 0;
+ qxl_fence_init(qdev, &bo->fence);
+ INIT_LIST_HEAD(&bo->list);
+ atomic_set(&bo->reserve_count, 0);
+ if (surf)
+ bo->surf = *surf;
+
+ qxl_ttm_placement_from_domain(bo, domain);
+
+ r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, 0, !kernel, NULL, size,
+ NULL, &qxl_ttm_bo_destroy);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(qdev->dev,
+ "object_init failed for (%lu, 0x%08X)\n",
+ size, domain);
+ return r;
+ }
+ *bo_ptr = bo;
+ return 0;
+}
+
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+{
+ bool is_iomem;
+ int r;
+
+ if (bo->kptr) {
+ if (ptr)
+ *ptr = bo->kptr;
+ return 0;
+ }
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ if (r)
+ return r;
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+ if (ptr)
+ *ptr = bo->kptr;
+ return 0;
+}
+
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+ struct qxl_bo *bo, int page_offset)
+{
+ struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+ void *rptr;
+ int ret;
+ struct io_mapping *map;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ map = qdev->vram_mapping;
+ else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+ map = qdev->surface_mapping;
+ else
+ goto fallback;
+
+ (void) ttm_mem_io_lock(man, false);
+ ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
+ ttm_mem_io_unlock(man);
+
+ return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+fallback:
+ if (bo->kptr) {
+ rptr = bo->kptr + (page_offset * PAGE_SIZE);
+ return rptr;
+ }
+
+ ret = qxl_bo_kmap(bo, &rptr);
+ if (ret)
+ return NULL;
+
+ rptr += page_offset * PAGE_SIZE;
+ return rptr;
+}
+
+void qxl_bo_kunmap(struct qxl_bo *bo)
+{
+ if (bo->kptr == NULL)
+ return;
+ bo->kptr = NULL;
+ ttm_bo_kunmap(&bo->kmap);
+}
+
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+ struct qxl_bo *bo, void *pmap)
+{
+ struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+ struct io_mapping *map;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ map = qdev->vram_mapping;
+ else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+ map = qdev->surface_mapping;
+ else
+ goto fallback;
+
+ io_mapping_unmap_atomic(pmap);
+
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
+ ttm_mem_io_unlock(man);
+ return ;
+ fallback:
+ qxl_bo_kunmap(bo);
+}
+
+void qxl_bo_unref(struct qxl_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+ tbo = &((*bo)->tbo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+}
+
+struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
+{
+ ttm_bo_reference(&bo->tbo);
+ return bo;
+}
+
+int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+{
+ struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ int r, i;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = qxl_bo_gpu_offset(bo);
+ return 0;
+ }
+ qxl_ttm_placement_from_domain(bo, domain);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (likely(r == 0)) {
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = qxl_bo_gpu_offset(bo);
+ }
+ if (unlikely(r != 0))
+ dev_err(qdev->dev, "%p pin failed\n", bo);
+ return r;
+}
+
+int qxl_bo_unpin(struct qxl_bo *bo)
+{
+ struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ int r, i;
+
+ if (!bo->pin_count) {
+ dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+ return r;
+}
+
+void qxl_bo_force_delete(struct qxl_device *qdev)
+{
+ struct qxl_bo *bo, *n;
+
+ if (list_empty(&qdev->gem.objects))
+ return;
+ dev_err(qdev->dev, "Userspace still has active objects !\n");
+ list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
+ mutex_lock(&qdev->ddev->struct_mutex);
+ dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+ *((unsigned long *)&bo->gem_base.refcount));
+ mutex_lock(&qdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&qdev->gem.mutex);
+ /* this should unref the ttm bo */
+ drm_gem_object_unreference(&bo->gem_base);
+ mutex_unlock(&qdev->ddev->struct_mutex);
+ }
+}
+
+int qxl_bo_init(struct qxl_device *qdev)
+{
+ return qxl_ttm_init(qdev);
+}
+
+void qxl_bo_fini(struct qxl_device *qdev)
+{
+ qxl_ttm_fini(qdev);
+}
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+ int ret;
+ if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
+ /* allocate a surface id for this surface now */
+ ret = qxl_surface_id_alloc(qdev, bo);
+ if (ret)
+ return ret;
+
+ ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
+{
+ struct qxl_bo_list *entry, *sf;
+
+ list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
+ qxl_bo_unreserve(entry->bo);
+ list_del(&entry->lhead);
+ kfree(entry);
+ }
+}
+
+int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
+{
+ struct qxl_bo_list *entry;
+ int ret;
+
+ list_for_each_entry(entry, &reloc_list->bos, lhead) {
+ if (entry->bo == bo)
+ return 0;
+ }
+
+ entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->bo = bo;
+ list_add(&entry->lhead, &reloc_list->bos);
+
+ ret = qxl_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ if (!bo->pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
+ if (ret)
+ return ret;
+ }
+
+ /* allocate a surface for reserved + validated buffers */
+ ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ if (ret)
+ return ret;
+ return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644
index 0000000..b4fd89f
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+#ifndef QXL_OBJECT_H
+#define QXL_OBJECT_H
+
+#include "qxl_drv.h"
+
+static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS) {
+ struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ dev_err(qdev->dev, "%p reserve failed\n", bo);
+ }
+ return r;
+ }
+ return 0;
+}
+
+static inline void qxl_bo_unreserve(struct qxl_bo *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
+{
+ return bo->tbo.offset;
+}
+
+static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
+{
+ return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
+{
+ return !!atomic_read(&bo->tbo.reserved);
+}
+
+static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+{
+ return bo->tbo.addr_space_offset;
+}
+
+static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
+ bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS) {
+ struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ dev_err(qdev->dev, "%p reserve failed for wait\n",
+ bo);
+ }
+ return r;
+ }
+ spin_lock(&bo->tbo.bdev->fence_lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
+
+extern int qxl_bo_create(struct qxl_device *qdev,
+ unsigned long size,
+ bool kernel, u32 domain,
+ struct qxl_surface *surf,
+ struct qxl_bo **bo_ptr);
+extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+extern void qxl_bo_kunmap(struct qxl_bo *bo);
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_unpin(struct qxl_bo *bo);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
+
+extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
+extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644
index 0000000..b443d67
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
+ * into 256 byte chunks for now - gives 16 cmds per page.
+ *
+ * use an ida to index into the chunks?
+ */
+/* manage releaseables */
+/* stack them 16 high for now -drawable object is 191 */
+#define RELEASE_SIZE 256
+#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
+#define SURFACE_RELEASE_SIZE 128
+#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+
+static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
+static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+ struct qxl_release **ret)
+{
+ struct qxl_release *release;
+ int handle;
+ size_t size = sizeof(*release);
+ int idr_ret;
+
+ release = kmalloc(size, GFP_KERNEL);
+ if (!release) {
+ DRM_ERROR("Out of memory\n");
+ return 0;
+ }
+ release->type = type;
+ release->bo_count = 0;
+ release->release_offset = 0;
+ release->surface_release_id = 0;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&qdev->release_idr_lock);
+ idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
+ spin_unlock(&qdev->release_idr_lock);
+ idr_preload_end();
+ handle = idr_ret;
+ if (idr_ret < 0)
+ goto release_fail;
+ *ret = release;
+ QXL_INFO(qdev, "allocated release %lld\n", handle);
+ release->id = handle;
+release_fail:
+
+ return handle;
+}
+
+void
+qxl_release_free(struct qxl_device *qdev,
+ struct qxl_release *release)
+{
+ int i;
+
+ QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
+ release->type, release->bo_count);
+
+ if (release->surface_release_id)
+ qxl_surface_id_dealloc(qdev, release->surface_release_id);
+
+ for (i = 0 ; i < release->bo_count; ++i) {
+ QXL_INFO(qdev, "release %llx\n",
+ release->bos[i]->tbo.addr_space_offset
+ - DRM_FILE_OFFSET);
+ qxl_fence_remove_release(&release->bos[i]->fence, release->id);
+ qxl_bo_unref(&release->bos[i]);
+ }
+ spin_lock(&qdev->release_idr_lock);
+ idr_remove(&qdev->release_idr, release->id);
+ spin_unlock(&qdev->release_idr_lock);
+ kfree(release);
+}
+
+void
+qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
+ struct qxl_bo *bo)
+{
+ int i;
+ for (i = 0; i < release->bo_count; i++)
+ if (release->bos[i] == bo)
+ return;
+
+ if (release->bo_count >= QXL_MAX_RES) {
+ DRM_ERROR("exceeded max resource on a qxl_release item\n");
+ return;
+ }
+ release->bos[release->bo_count++] = qxl_bo_ref(bo);
+}
+
+static int qxl_release_bo_alloc(struct qxl_device *qdev,
+ struct qxl_bo **bo)
+{
+ int ret;
+ ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+ bo);
+ return ret;
+}
+
+int qxl_release_reserve(struct qxl_device *qdev,
+ struct qxl_release *release, bool no_wait)
+{
+ int ret;
+ if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
+ ret = qxl_bo_reserve(release->bos[0], no_wait);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+void qxl_release_unreserve(struct qxl_device *qdev,
+ struct qxl_release *release)
+{
+ if (atomic_dec_and_test(&release->bos[0]->reserve_count))
+ qxl_bo_unreserve(release->bos[0]);
+}
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+ enum qxl_surface_cmd_type surface_cmd_type,
+ struct qxl_release *create_rel,
+ struct qxl_release **release)
+{
+ int ret;
+
+ if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
+ int idr_ret;
+ struct qxl_bo *bo;
+ union qxl_release_info *info;
+
+ /* stash the release after the create command */
+ idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+ bo = qxl_bo_ref(create_rel->bos[0]);
+
+ (*release)->release_offset = create_rel->release_offset + 64;
+
+ qxl_release_add_res(qdev, *release, bo);
+
+ ret = qxl_release_reserve(qdev, *release, false);
+ if (ret) {
+ DRM_ERROR("release reserve failed\n");
+ goto out_unref;
+ }
+ info = qxl_release_map(qdev, *release);
+ info->id = idr_ret;
+ qxl_release_unmap(qdev, *release, info);
+
+
+out_unref:
+ qxl_bo_unref(&bo);
+ return ret;
+ }
+
+ return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
+ QXL_RELEASE_SURFACE_CMD, release, NULL);
+}
+
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+ int type, struct qxl_release **release,
+ struct qxl_bo **rbo)
+{
+ struct qxl_bo *bo;
+ int idr_ret;
+ int ret;
+ union qxl_release_info *info;
+ int cur_idx;
+
+ if (type == QXL_RELEASE_DRAWABLE)
+ cur_idx = 0;
+ else if (type == QXL_RELEASE_SURFACE_CMD)
+ cur_idx = 1;
+ else if (type == QXL_RELEASE_CURSOR_CMD)
+ cur_idx = 2;
+ else {
+ DRM_ERROR("got illegal type: %d\n", type);
+ return -EINVAL;
+ }
+
+ idr_ret = qxl_release_alloc(qdev, type, release);
+
+ mutex_lock(&qdev->release_mutex);
+ if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
+ qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+ qdev->current_release_bo_offset[cur_idx] = 0;
+ qdev->current_release_bo[cur_idx] = NULL;
+ }
+ if (!qdev->current_release_bo[cur_idx]) {
+ ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+ if (ret) {
+ mutex_unlock(&qdev->release_mutex);
+ return ret;
+ }
+
+ /* pin releases bo's they are too messy to evict */
+ ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
+ qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
+ qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
+ }
+
+ bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+
+ (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
+ qdev->current_release_bo_offset[cur_idx]++;
+
+ if (rbo)
+ *rbo = bo;
+
+ qxl_release_add_res(qdev, *release, bo);
+
+ ret = qxl_release_reserve(qdev, *release, false);
+ mutex_unlock(&qdev->release_mutex);
+ if (ret)
+ goto out_unref;
+
+ info = qxl_release_map(qdev, *release);
+ info->id = idr_ret;
+ qxl_release_unmap(qdev, *release, info);
+
+out_unref:
+ qxl_bo_unref(&bo);
+ return ret;
+}
+
+int qxl_fence_releaseable(struct qxl_device *qdev,
+ struct qxl_release *release)
+{
+ int i, ret;
+ for (i = 0; i < release->bo_count; i++) {
+ if (!release->bos[i]->tbo.sync_obj)
+ release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
+ ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+ uint64_t id)
+{
+ struct qxl_release *release;
+
+ spin_lock(&qdev->release_idr_lock);
+ release = idr_find(&qdev->release_idr, id);
+ spin_unlock(&qdev->release_idr_lock);
+ if (!release) {
+ DRM_ERROR("failed to find id in release_idr\n");
+ return NULL;
+ }
+ if (release->bo_count < 1) {
+ DRM_ERROR("read a released resource with 0 bos\n");
+ return NULL;
+ }
+ return release;
+}
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+ struct qxl_release *release)
+{
+ void *ptr;
+ union qxl_release_info *info;
+ struct qxl_bo *bo = release->bos[0];
+
+ ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+ info = ptr + (release->release_offset & ~PAGE_SIZE);
+ return info;
+}
+
+void qxl_release_unmap(struct qxl_device *qdev,
+ struct qxl_release *release,
+ union qxl_release_info *info)
+{
+ struct qxl_bo *bo = release->bos[0];
+ void *ptr;
+
+ ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+ qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644
index 0000000..489cb8c
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/qxl_drm.h>
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/delay.h>
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+
+static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+{
+ struct qxl_mman *mman;
+ struct qxl_device *qdev;
+
+ mman = container_of(bdev, struct qxl_mman, bdev);
+ qdev = container_of(mman, struct qxl_device, mman);
+ return qdev;
+}
+
+static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int qxl_ttm_global_init(struct qxl_device *qdev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ qdev->mman.mem_global_referenced = false;
+ global_ref = &qdev->mman.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &qxl_ttm_mem_global_init;
+ global_ref->release = &qxl_ttm_mem_global_release;
+
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ qdev->mman.bo_global_ref.mem_glob =
+ qdev->mman.mem_global_ref.object;
+ global_ref = &qdev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&qdev->mman.mem_global_ref);
+ return r;
+ }
+
+ qdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void qxl_ttm_global_fini(struct qxl_device *qdev)
+{
+ if (qdev->mman.mem_global_referenced) {
+ drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
+ drm_global_item_unref(&qdev->mman.mem_global_ref);
+ qdev->mman.mem_global_referenced = false;
+ }
+}
+
+static struct vm_operations_struct qxl_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo;
+ struct qxl_device *qdev;
+ int r;
+
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ if (bo == NULL)
+ return VM_FAULT_NOPAGE;
+ qdev = qxl_get_qdev(bo->bdev);
+ r = ttm_vm_ops->fault(vma, vmf);
+ return r;
+}
+
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct qxl_device *qdev;
+ int r;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+ pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
+ __func__, vma->vm_pgoff);
+ return drm_mmap(filp, vma);
+ }
+
+ file_priv = filp->private_data;
+ qdev = file_priv->minor->dev->dev_private;
+ if (qdev == NULL) {
+ DRM_ERROR(
+ "filp->private_data->minor->dev->dev_private == NULL\n");
+ return -EINVAL;
+ }
+ QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+ __func__, filp->private_data, vma->vm_pgoff);
+
+ r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
+ if (unlikely(r != 0))
+ return r;
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ qxl_ttm_vm_ops = *ttm_vm_ops;
+ qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+ }
+ vma->vm_ops = &qxl_ttm_vm_ops;
+ return 0;
+}
+
+static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ return 0;
+}
+
+static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct qxl_device *qdev;
+
+ qdev = qxl_get_qdev(bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ case TTM_PL_PRIV0:
+ /* "On-card" video ram */
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qxl_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct qxl_bo *qbo;
+ static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ if (!qxl_ttm_bo_is_qxl_bo(bo)) {
+ placement->fpfn = 0;
+ placement->lpfn = 0;
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+ placement->num_busy_placement = 1;
+ return;
+ }
+ qbo = container_of(bo, struct qxl_bo, tbo);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+ *placement = qbo->placement;
+}
+
+static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct qxl_device *qdev = qxl_get_qdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.is_iomem = true;
+ mem->bus.base = qdev->vram_base;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ break;
+ case TTM_PL_PRIV0:
+ mem->bus.is_iomem = true;
+ mem->bus.base = qdev->surfaceram_base;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct qxl_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct qxl_device *qdev;
+ u64 offset;
+};
+
+static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct qxl_ttm_tt *gtt = (void *)ttm;
+
+ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ if (!ttm->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ ttm->num_pages, bo_mem, ttm);
+ }
+ /* Not implemented */
+ return -1;
+}
+
+static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+ /* Not implemented */
+ return -1;
+}
+
+static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+ struct qxl_ttm_tt *gtt = (void *)ttm;
+
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+}
+
+static struct ttm_backend_func qxl_backend_func = {
+ .bind = &qxl_ttm_backend_bind,
+ .unbind = &qxl_ttm_backend_unbind,
+ .destroy = &qxl_ttm_backend_destroy,
+};
+
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ r = ttm_pool_populate(ttm);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct qxl_device *qdev;
+ struct qxl_ttm_tt *gtt;
+
+ qdev = qxl_get_qdev(bdev);
+ gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
+ if (gtt == NULL)
+ return NULL;
+ gtt->ttm.ttm.func = &qxl_backend_func;
+ gtt->qdev = qdev;
+ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+ dummy_read_page)) {
+ kfree(gtt);
+ return NULL;
+ }
+ return &gtt->ttm.ttm;
+}
+
+static void qxl_move_null(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ BUG_ON(old_mem->mm_node != NULL);
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+}
+
+static int qxl_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ qxl_move_null(bo, new_mem);
+ return 0;
+ }
+ return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static int qxl_sync_obj_wait(void *sync_obj,
+ bool lazy, bool interruptible)
+{
+ struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+ int count = 0, sc = 0;
+ struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+ if (qfence->num_active_releases == 0)
+ return 0;
+
+retry:
+ if (sc == 0) {
+ if (bo->type == QXL_GEM_DOMAIN_SURFACE)
+ qxl_update_surface(qfence->qdev, bo);
+ } else if (sc >= 1) {
+ qxl_io_notify_oom(qfence->qdev);
+ }
+
+ sc++;
+
+ for (count = 0; count < 10; count++) {
+ bool ret;
+ ret = qxl_queue_garbage_collect(qfence->qdev, true);
+ if (ret == false)
+ break;
+
+ if (qfence->num_active_releases == 0)
+ return 0;
+ }
+
+ if (qfence->num_active_releases) {
+ bool have_drawable_releases = false;
+ void **slot;
+ struct radix_tree_iter iter;
+ int release_id;
+
+ radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
+ struct qxl_release *release;
+
+ release_id = iter.index;
+ release = qxl_release_from_id_locked(qfence->qdev, release_id);
+ if (release == NULL)
+ continue;
+
+ if (release->type == QXL_RELEASE_DRAWABLE)
+ have_drawable_releases = true;
+ }
+
+ qxl_queue_garbage_collect(qfence->qdev, true);
+
+ if (have_drawable_releases || sc < 4) {
+ if (sc > 2)
+ /* back off */
+ usleep_range(500, 1000);
+ if (have_drawable_releases && sc > 300) {
+ WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
+ return -EBUSY;
+ }
+ goto retry;
+ }
+ }
+ return 0;
+}
+
+static int qxl_sync_obj_flush(void *sync_obj)
+{
+ return 0;
+}
+
+static void qxl_sync_obj_unref(void **sync_obj)
+{
+}
+
+static void *qxl_sync_obj_ref(void *sync_obj)
+{
+ return sync_obj;
+}
+
+static bool qxl_sync_obj_signaled(void *sync_obj)
+{
+ struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+ return (qfence->num_active_releases == 0);
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ struct qxl_bo *qbo;
+ struct qxl_device *qdev;
+
+ if (!qxl_ttm_bo_is_qxl_bo(bo))
+ return;
+ qbo = container_of(bo, struct qxl_bo, tbo);
+ qdev = qbo->gem_base.dev->dev_private;
+
+ if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+ qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+}
+
+static struct ttm_bo_driver qxl_bo_driver = {
+ .ttm_tt_create = &qxl_ttm_tt_create,
+ .ttm_tt_populate = &qxl_ttm_tt_populate,
+ .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
+ .invalidate_caches = &qxl_invalidate_caches,
+ .init_mem_type = &qxl_init_mem_type,
+ .evict_flags = &qxl_evict_flags,
+ .move = &qxl_bo_move,
+ .verify_access = &qxl_verify_access,
+ .io_mem_reserve = &qxl_ttm_io_mem_reserve,
+ .io_mem_free = &qxl_ttm_io_mem_free,
+ .sync_obj_signaled = &qxl_sync_obj_signaled,
+ .sync_obj_wait = &qxl_sync_obj_wait,
+ .sync_obj_flush = &qxl_sync_obj_flush,
+ .sync_obj_unref = &qxl_sync_obj_unref,
+ .sync_obj_ref = &qxl_sync_obj_ref,
+ .move_notify = &qxl_bo_move_notify,
+};
+
+
+
+int qxl_ttm_init(struct qxl_device *qdev)
+{
+ int r;
+ int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+
+ r = qxl_ttm_global_init(qdev);
+ if (r)
+ return r;
+ /* No others user of address space so set it to 0 */
+ r = ttm_bo_device_init(&qdev->mman.bdev,
+ qdev->mman.bo_global_ref.ref.object,
+ &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
+ if (r) {
+ DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ return r;
+ }
+ /* NOTE: this includes the framebuffer (aka surface 0) */
+ num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
+ r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
+ num_io_pages);
+ if (r) {
+ DRM_ERROR("Failed initializing VRAM heap.\n");
+ return r;
+ }
+ r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+ qdev->surfaceram_size / PAGE_SIZE);
+ if (r) {
+ DRM_ERROR("Failed initializing Surfaces heap.\n");
+ return r;
+ }
+ DRM_INFO("qxl: %uM of VRAM memory size\n",
+ (unsigned)qdev->vram_size / (1024 * 1024));
+ DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
+ ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+ if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+ qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+ r = qxl_ttm_debugfs_init(qdev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+ return r;
+ }
+ return 0;
+}
+
+void qxl_ttm_fini(struct qxl_device *qdev)
+{
+ ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+ ttm_bo_device_release(&qdev->mman.bdev);
+ qxl_ttm_global_fini(qdev);
+ DRM_INFO("qxl: ttm finalized\n");
+}
+
+
+#define QXL_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int qxl_mm_dump_table(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct qxl_device *rdev = dev->dev_private;
+ int ret;
+ struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+ spin_lock(&glob->lru_lock);
+ ret = drm_mm_dump_table(m, mm);
+ spin_unlock(&glob->lru_lock);
+ return ret;
+}
+#endif
+
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+ static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+ unsigned i;
+
+ for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+ if (i == 0)
+ sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+ else
+ sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+ qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+ qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+ qxl_mem_types_list[i].driver_features = 0;
+ if (i == 0)
+ qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+ else
+ qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+
+ }
+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index bf17252..86c5e36 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
- si_blit_shaders.o radeon_prime.o
+ si_blit_shaders.o radeon_prime.o radeon_uvd.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 46a9c37..fb441a7 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 4b04ba3..0ee5737 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -458,6 +458,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
@@ -490,6 +491,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 21a892c..6d6fdb3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ /* use frac fb div on RS780/RS880 */
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4552d4a..44a7da6 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2150,13 +2150,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
atombios_apply_encoder_quirks(encoder, adjusted_mode);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- r600_hdmi_enable(encoder);
- if (ASIC_IS_DCE6(rdev))
- ; /* TODO (use pointers instead of if-s?) */
- else if (ASIC_IS_DCE4(rdev))
- evergreen_hdmi_setmode(encoder, adjusted_mode);
- else
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, true);
+ if (rdev->asic->display.hdmi_setmode)
+ radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
}
}
@@ -2413,8 +2410,10 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
disable_done:
if (radeon_encoder_is_digital(encoder)) {
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- r600_hdmi_disable(encoder);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, false);
+ }
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 305a657..105bafb 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -53,6 +53,864 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
+static const u32 evergreen_golden_registers[] =
+{
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x10830, 0xffffffff, 0x00000011,
+ 0x11430, 0xffffffff, 0x00000011,
+ 0x12030, 0xffffffff, 0x00000011,
+ 0x12c30, 0xffffffff, 0x00000011,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x10c, 0x00000001, 0x00000001,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8cf0, 0xffffffff, 0x08e00620,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x913c, 0x0000000f, 0x0000000a
+};
+
+static const u32 evergreen_golden_registers2[] =
+{
+ 0x2f4c, 0xffffffff, 0x00000000,
+ 0x54f4, 0xffffffff, 0x00000000,
+ 0x54f0, 0xffffffff, 0x00000000,
+ 0x5498, 0xffffffff, 0x00000000,
+ 0x549c, 0xffffffff, 0x00000000,
+ 0x5494, 0xffffffff, 0x00000000,
+ 0x53cc, 0xffffffff, 0x00000000,
+ 0x53c8, 0xffffffff, 0x00000000,
+ 0x53c4, 0xffffffff, 0x00000000,
+ 0x53c0, 0xffffffff, 0x00000000,
+ 0x53bc, 0xffffffff, 0x00000000,
+ 0x53b8, 0xffffffff, 0x00000000,
+ 0x53b4, 0xffffffff, 0x00000000,
+ 0x53b0, 0xffffffff, 0x00000000
+};
+
+static const u32 cypress_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0x40010000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 redwood_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 cedar_golden_registers[] =
+{
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000000,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x10830, 0xffffffff, 0x00000011,
+ 0x11430, 0xffffffff, 0x00000011,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x10c, 0x00000001, 0x00000001,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8cf0, 0xffffffff, 0x08e00410,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 cedar_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9178, 0xffffffff, 0x00050000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00010004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00010004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 juniper_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 supersumo_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9150, 0xffffffff, 0x6e944040,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8030, 0xffffffff, 0x0000100a,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x913c, 0xffff000f, 0x0100000a,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8cf0, 0x1fffffff, 0x08e00620,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 sumo_golden_registers[] =
+{
+ 0x900c, 0x00ffffff, 0x0017071f,
+ 0x8c18, 0xffffffff, 0x10101060,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x8c30, 0x0000000f, 0x00000005,
+ 0x9688, 0x0000000f, 0x00000007
+};
+
+static const u32 wrestler_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x9150, 0xffffffff, 0x6e944040,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8030, 0xffffffff, 0x0000100a,
+ 0x8a14, 0xffffffff, 0x00000001,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x913c, 0xffff000f, 0x0100000a,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8cf0, 0x1fffffff, 0x08e00410,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x900c, 0xffffffff, 0x0017071f,
+ 0x8c18, 0xffffffff, 0x10101060,
+ 0x8c1c, 0xffffffff, 0x00001010
+};
+
+static const u32 barts_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x70073777, 0x00010001,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02011003,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02011003,
+ 0x98fc, 0xffffffff, 0x76543210,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x00000007, 0x02011003,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00620,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 turks_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x8c8, 0x00003000, 0x00001070,
+ 0x8cc, 0x000fffff, 0x00040035,
+ 0x3f90, 0xffff0000, 0xfff00000,
+ 0x9148, 0xffff0000, 0xfff00000,
+ 0x3f94, 0xffff0000, 0xfff00000,
+ 0x914c, 0xffff0000, 0xfff00000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x00073007, 0x00010002,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02010002,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x00010002,
+ 0x98fc, 0xffffffff, 0x33221100,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x00010002,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 caicos_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x8c8, 0x00003420, 0x00001450,
+ 0x8cc, 0x000fffff, 0x00040035,
+ 0x3f90, 0xffff0000, 0xfffc0000,
+ 0x9148, 0xffff0000, 0xfffc0000,
+ 0x3f94, 0xffff0000, 0xfffc0000,
+ 0x914c, 0xffff0000, 0xfffc0000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x00073007, 0x00010001,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02010001,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02010001,
+ 0x98fc, 0xffffffff, 0x33221100,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x02010001,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000001,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static void evergreen_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ cypress_mgcg_init,
+ (const u32)ARRAY_SIZE(cypress_mgcg_init));
+ break;
+ case CHIP_JUNIPER:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ juniper_mgcg_init,
+ (const u32)ARRAY_SIZE(juniper_mgcg_init));
+ break;
+ case CHIP_REDWOOD:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ redwood_mgcg_init,
+ (const u32)ARRAY_SIZE(redwood_mgcg_init));
+ break;
+ case CHIP_CEDAR:
+ radeon_program_register_sequence(rdev,
+ cedar_golden_registers,
+ (const u32)ARRAY_SIZE(cedar_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ cedar_mgcg_init,
+ (const u32)ARRAY_SIZE(cedar_mgcg_init));
+ break;
+ case CHIP_PALM:
+ radeon_program_register_sequence(rdev,
+ wrestler_golden_registers,
+ (const u32)ARRAY_SIZE(wrestler_golden_registers));
+ break;
+ case CHIP_SUMO:
+ radeon_program_register_sequence(rdev,
+ supersumo_golden_registers,
+ (const u32)ARRAY_SIZE(supersumo_golden_registers));
+ break;
+ case CHIP_SUMO2:
+ radeon_program_register_sequence(rdev,
+ supersumo_golden_registers,
+ (const u32)ARRAY_SIZE(supersumo_golden_registers));
+ radeon_program_register_sequence(rdev,
+ sumo_golden_registers,
+ (const u32)ARRAY_SIZE(sumo_golden_registers));
+ break;
+ case CHIP_BARTS:
+ radeon_program_register_sequence(rdev,
+ barts_golden_registers,
+ (const u32)ARRAY_SIZE(barts_golden_registers));
+ break;
+ case CHIP_TURKS:
+ radeon_program_register_sequence(rdev,
+ turks_golden_registers,
+ (const u32)ARRAY_SIZE(turks_golden_registers));
+ break;
+ case CHIP_CAICOS:
+ radeon_program_register_sequence(rdev,
+ caicos_golden_registers,
+ (const u32)ARRAY_SIZE(caicos_golden_registers));
+ break;
+ default:
+ break;
+ }
+}
+
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
@@ -84,6 +942,142 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
}
}
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+ u32 cntl_reg, u32 status_reg)
+{
+ int r, i;
+ struct atom_clock_dividers dividers;
+
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ clock, false, &dividers);
+ if (r)
+ return r;
+
+ WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32(status_reg) & DCLK_STATUS)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ int r = 0;
+ u32 cg_scratch = RREG32(CG_SCRATCH1);
+
+ r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+ if (r)
+ goto done;
+ cg_scratch &= 0xffff0000;
+ cg_scratch |= vclk / 100; /* Mhz */
+
+ r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+ if (r)
+ goto done;
+ cg_scratch &= 0x0000ffff;
+ cg_scratch |= (dclk / 100) << 16; /* Mhz */
+
+done:
+ WREG32(CG_SCRATCH1, cg_scratch);
+
+ return r;
+}
+
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ /* start off with something large */
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* put PLL in bypass mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+ 16384, 0x03FFFFFF, 0, 128, 5,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ /* set VCO_MODE to 1 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+ /* toggle UPLL_SLEEP to 1 then back to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+ /* deassert UPLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(1);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert UPLL_RESET again */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* disable spread spectrum. */
+ WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+ /* set feedback divider */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+ /* set ref divider to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+ if (fb_div < 307200)
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+ else
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+ /* set PDIV_A and PDIV_B */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+ ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* switch from bypass mode to normal mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
+
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
u16 ctl, v;
@@ -105,6 +1099,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
@@ -115,21 +1130,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
- if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ }
+
+ while (!dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
@@ -608,6 +1630,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +2357,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
@@ -1347,6 +2378,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -1364,6 +2404,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +2441,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
@@ -2050,6 +3133,14 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
+ tmp = 0;
+ for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+ tmp |= (1 << i);
+ /* if all the backends are disabled, fix it up here */
+ if ((disabled_rb_mask & tmp) == tmp) {
+ for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+ disabled_rb_mask &= ~(1 << i);
+ }
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -2058,6 +3149,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
if ((rdev->config.evergreen.max_backends == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
@@ -3360,6 +4454,9 @@ restart_ih:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
break;
case 146:
case 147:
@@ -3571,7 +4668,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
static int evergreen_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -3638,6 +4735,17 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3647,6 +4755,7 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
@@ -3670,6 +4779,19 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+
+ if (r)
+ DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3701,6 +4823,9 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ evergreen_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
@@ -3716,8 +4841,10 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
+ r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -3762,6 +4889,8 @@ int evergreen_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ evergreen_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -3797,6 +4926,13 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+ 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3843,6 +4979,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
@@ -3878,7 +5015,7 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -3889,33 +5026,33 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 4fdecc2..b4ab8ce 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -54,6 +54,68 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
/*
* build a HDMI Video Info Frame
*/
@@ -85,6 +147,30 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
frame[0xC] | (frame[0xD] << 8));
}
+static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ u32 base_rate = 48000;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* XXX: properly calculate this */
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
+ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+}
+
+
/*
* update the info frames with the data from the current display mode
*/
@@ -104,33 +190,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
- r600_audio_set_clock(encoder, mode->clock);
+ evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND); /* send null packets when required */
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
- WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
- AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
-
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
- HDMI_ACR_SOURCE); /* select SW CTS value */
-
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND | /* send null packets when required */
HDMI_GC_SEND | /* send general control packets */
HDMI_GC_CONT); /* send general control packets every frame */
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
@@ -138,11 +210,47 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32(AFMT_60958_0 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+
+ WREG32(AFMT_60958_1 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+
+ WREG32(AFMT_60958_2 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+ AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+ AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+ AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+ AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+ AFMT_60958_CS_CHANNEL_NUMBER_7(8));
+
+ /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+ AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
+ /* fglrx sets 0x40 in 0x5f80 here */
+ evergreen_hdmi_write_sad_regs(encoder);
+
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
@@ -156,7 +264,17 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
}
evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
- evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI_AVI_INFO_LINE_MASK);
+
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
@@ -164,3 +282,20 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
}
+
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f585be1..881aba2 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -226,6 +226,8 @@
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 982d25a..75c0563 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -53,6 +53,43 @@
#define RCU_IND_INDEX 0x100
#define RCU_IND_DATA 0x104
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x718
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_VCO_MODE_MASK 0x00000200
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x71c
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x720
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x854
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_SPREAD_SPECTRUM 0x79c
+# define SSEN_MASK 0x00000001
+
+/* fusion uvd clocks */
+#define CG_DCLK_CNTL 0x610
+# define DCLK_DIVIDER_MASK 0x7f
+# define DCLK_DIR_CNTL_EN (1 << 8)
+#define CG_DCLK_STATUS 0x614
+# define DCLK_STATUS (1 << 0)
+#define CG_VCLK_CNTL 0x618
+#define CG_VCLK_STATUS 0x61c
+#define CG_SCRATCH1 0x820
+
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
@@ -197,6 +234,7 @@
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7048
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
@@ -992,6 +1030,16 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 27769e7..7969c0c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -78,6 +78,282 @@ MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+
+static const u32 cayman_golden_registers2[] =
+{
+ 0x3e5c, 0xffffffff, 0x00000000,
+ 0x3e48, 0xffffffff, 0x00000000,
+ 0x3e4c, 0xffffffff, 0x00000000,
+ 0x3e64, 0xffffffff, 0x00000000,
+ 0x3e50, 0xffffffff, 0x00000000,
+ 0x3e60, 0xffffffff, 0x00000000
+};
+
+static const u32 cayman_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x70073777, 0x00011003,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x02011003,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02011003,
+ 0x98fc, 0xffffffff, 0x76541032,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x42010001,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000010f, 0x01000100,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88d0, 0xffffffff, 0x0f40df40,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 dvst_golden_registers2[] =
+{
+ 0x8f8, 0xffffffff, 0,
+ 0x8fc, 0x00380000, 0,
+ 0x8f8, 0xffffffff, 1,
+ 0x8fc, 0x0e000000, 0
+};
+
+static const u32 dvst_golden_registers[] =
+{
+ 0x690, 0x3fff3fff, 0x20c00033,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x00030000, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x20ef8, 0x01ff01ff, 0x00000002,
+ 0x20e98, 0xfffffbff, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 scrapper_golden_registers[] =
+{
+ 0x690, 0x3fff3fff, 0x20c00033,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x00030000, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x90e8, 0x001fffff, 0x010400c0,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c30, 0x0000000f, 0x00040005,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x900c, 0x00ffffff, 0x0017071f,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x9688, 0x00300000, 0x0017000f,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x20ef8, 0x01ff01ff, 0x00000002,
+ 0x20e98, 0xfffffbff, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static void ni_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ radeon_program_register_sequence(rdev,
+ cayman_golden_registers,
+ (const u32)ARRAY_SIZE(cayman_golden_registers));
+ radeon_program_register_sequence(rdev,
+ cayman_golden_registers2,
+ (const u32)ARRAY_SIZE(cayman_golden_registers2));
+ break;
+ case CHIP_ARUBA:
+ if ((rdev->pdev->device == 0x9900) ||
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9903) ||
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x990B) ||
+ (rdev->pdev->device == 0x990C) ||
+ (rdev->pdev->device == 0x990D) ||
+ (rdev->pdev->device == 0x990E) ||
+ (rdev->pdev->device == 0x990F) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9917) ||
+ (rdev->pdev->device == 0x9918)) {
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers,
+ (const u32)ARRAY_SIZE(dvst_golden_registers));
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers2,
+ (const u32)ARRAY_SIZE(dvst_golden_registers2));
+ } else {
+ radeon_program_register_sequence(rdev,
+ scrapper_golden_registers,
+ (const u32)ARRAY_SIZE(scrapper_golden_registers));
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers2,
+ (const u32)ARRAY_SIZE(dvst_golden_registers2));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
#define BTC_IO_MC_REGS_SIZE 29
static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@@ -473,7 +749,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990F) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917) ||
- (rdev->pdev->device == 0x9999)) {
+ (rdev->pdev->device == 0x9999) ||
+ (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
@@ -482,7 +759,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990D) ||
(rdev->pdev->device == 0x990E) ||
(rdev->pdev->device == 0x9913) ||
- (rdev->pdev->device == 0x9918)) {
+ (rdev->pdev->device == 0x9918) ||
+ (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
@@ -615,15 +893,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
+ tmp = 0;
+ for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+ tmp |= (1 << i);
+ /* if all the backends are disabled, fix it up here */
+ if ((disabled_rb_mask & tmp) == tmp) {
+ for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+ disabled_rb_mask &= ~(1 << i);
+ }
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ if (ASIC_IS_DCE6(rdev))
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
@@ -931,6 +1222,23 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, 10); /* poll interval */
}
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
+
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1682,6 +1990,16 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
@@ -1748,6 +2066,18 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1778,6 +2108,9 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ ni_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
@@ -1794,6 +2127,8 @@ int cayman_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
+ r600_uvd_rbc_stop(rdev);
+ radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -1834,6 +2169,8 @@ int cayman_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ ni_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -1868,6 +2205,13 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1919,6 +2263,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -2017,28 +2362,57 @@ void cayman_vm_set_page(struct radeon_device *rdev,
}
}
} else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
+ if ((flags & RADEON_VM_PAGE_SYSTEM) ||
+ (count == 1)) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
+ if (flags & RADEON_VM_PAGE_VALID)
value = addr;
- } else {
+ else
value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
}
}
while (ib->length_dw & 0x7)
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 079dee2..e226faf 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -45,6 +45,10 @@
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define DMIF_ADDR_CONFIG 0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
@@ -486,6 +490,18 @@
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW 0xEF00
+#define UVD_SEMA_ADDR_HIGH 0xEF04
+#define UVD_SEMA_CMD 0xEF08
+#define UVD_UDEC_ADDR_CONFIG 0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_RBC_RB_RPTR 0xF690
+#define UVD_RBC_RB_WPTR 0xF694
+
+/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
@@ -668,6 +684,11 @@
(((vmid) & 0xF) << 20) | \
(((n) & 0xFFFFF) << 0))
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9db5853..4973bff 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* and others in some cases.
*/
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0) {
+ if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ } else {
+ if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ }
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 vline1, vline2;
+
+ if (crtc == 0) {
+ vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ } else {
+ vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ }
+ if (vline1 != vline2)
+ return true;
+ else
+ return false;
+}
+
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
if (crtc == 0) {
- if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
- break;
- udelay(1);
- }
- }
+ if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+ return;
} else {
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
- break;
- udelay(1);
- }
+ if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+ return;
+ }
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
}
}
}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index c0dc8d3..1dd0d32 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -358,7 +358,9 @@
#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0740db3..1a08008 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1145,7 +1145,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = 0xFFFFFFFF - mc->gtt_end;
+ size_af = mc->mc_mask - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -2552,6 +2552,193 @@ void r600_dma_fini(struct radeon_device *rdev)
}
/*
+ * UVD
+ */
+int r600_uvd_rbc_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint64_t rptr_addr;
+ uint32_t rb_bufsz, tmp;
+ int r;
+
+ rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
+
+ if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
+ DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
+ return -EINVAL;
+ }
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
+
+ ring->ready = true;
+ r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 10);
+ if (r) {
+ DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+ return r;
+ }
+
+ tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+ radeon_ring_write(ring, 0x8);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+ radeon_ring_write(ring, 3);
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+ return 0;
+}
+
+void r600_uvd_rbc_stop(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+ ring->ready = false;
+}
+
+int r600_uvd_init(struct radeon_device *rdev)
+{
+ int i, j, r;
+
+ /* raise clocks while booting up the VCPU */
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ /* disable clock gating */
+ WREG32(UVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+ LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+ CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+ /* disable byte swapping */
+ WREG32(UVD_LMI_SWAP_CNTL, 0);
+ WREG32(UVD_MP_SWAP_CNTL, 0);
+
+ WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXA1, 0x0);
+ WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXB1, 0x0);
+ WREG32(UVD_MPC_SET_ALU, 0);
+ WREG32(UVD_MPC_SET_MUX, 0x88);
+
+ /* Stall UMC */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(UVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(UVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ r = r600_uvd_rbc_start(rdev);
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ /* lower clocks again */
+ radeon_set_uvd_clocks(rdev, 0, 0);
+
+ return r;
+}
+
+/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2660,6 +2847,40 @@ int r600_dma_ring_test(struct radeon_device *rdev,
return r;
}
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(UVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
/*
* CP fences/semaphores
*/
@@ -2711,6 +2932,30 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -2780,6 +3025,23 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
}
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -3183,6 +3445,16 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
@@ -3300,6 +3572,41 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ radeon_fence_unref(&fence);
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+}
+
/**
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
*
@@ -4232,7 +4539,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
- u32 link_width_cntl, mask, target_reg;
+ u32 link_width_cntl, mask;
if (rdev->flags & RADEON_IS_IGP)
return;
@@ -4244,7 +4551,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
if (ASIC_IS_X2(rdev))
return;
- /* FIXME wait for idle */
+ radeon_gui_idle(rdev);
switch (lanes) {
case 0:
@@ -4263,53 +4570,24 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
break;
case 12:
+ /* not actually supported */
mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
break;
case 16:
- default:
mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
break;
- }
-
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
- if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
- (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
- return;
-
- if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ default:
+ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
return;
+ }
- link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
- RADEON_PCIE_LC_RECONFIG_NOW |
- R600_PCIE_LC_RENEGOTIATE_EN |
- R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
- link_width_cntl |= mask;
-
- WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
- /* some northbridges can renegotiate the link rather than requiring
- * a complete re-config.
- * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
- */
- if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
- link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
- else
- link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
-
- WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
- RADEON_PCIE_LC_RECONFIG_NOW));
-
- if (rdev->family >= CHIP_RV770)
- target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
- else
- target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
-
- /* wait for lane set to complete */
- link_width_cntl = RREG32(target_reg);
- while (link_width_cntl == 0xffffffff)
- link_width_cntl = RREG32(target_reg);
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+ link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4326,13 +4604,11 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return 0;
- /* FIXME wait for idle */
+ radeon_gui_idle(rdev);
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
- case RADEON_PCIE_LC_LINK_WIDTH_X0:
- return 0;
case RADEON_PCIE_LC_LINK_WIDTH_X1:
return 1;
case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4341,6 +4617,10 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
return 4;
case RADEON_PCIE_LC_LINK_WIDTH_X8:
return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
case RADEON_PCIE_LC_LINK_WIDTH_X16:
default:
return 16;
@@ -4378,7 +4658,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -4391,23 +4671,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -4428,7 +4708,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
tmp = RREG32(0x541c);
WREG32(0x541c, tmp | 0x8);
@@ -4442,27 +4722,27 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
- training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+ training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
training_cntl &= ~LC_POINT_7_PLUS_EN;
- WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
} else {
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index cb03fe2..c92eb86 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,10 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
- || rdev->family == CHIP_RS600
- || rdev->family == CHIP_RS690
- || rdev->family == CHIP_RS740;
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
}
struct r600_audio r600_audio_status(struct radeon_device *rdev)
@@ -184,65 +181,6 @@ int r600_audio_init(struct radeon_device *rdev)
}
/*
- * atach the audio codec to the clock source of the encoder
- */
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- int base_rate = 48000;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
- break;
- default:
- dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
- }
-
- if (ASIC_IS_DCE4(rdev)) {
- /* TODO: other PLLs? */
- WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
- WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
- WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
-
- /* Select DTO source */
- WREG32(0x5ac, radeon_crtc->crtc_id);
- } else {
- switch (dig->dig_encoder) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- default:
- dev_err(rdev->dev,
- "Unsupported DIG on encoder 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
- }
- }
-}
-
-/*
* release the audio timer
* TODO: How to do this correctly on SMP systems?
*/
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 21ecc0e..47f180a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,6 +226,39 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 base_rate = 48000;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
+ * doesn't matter which one you use. Just use the first one.
+ */
+ /* XXX: properly calculate this */
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (ASIC_IS_DCE3(rdev)) {
+ /* according to the reg specs, this should DCE3.2 only, but in
+ * practice it seems to cover DCE3.0 as well.
+ */
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
+ AUDIO_DTO_MODULE(clock * 100));
+ }
+}
/*
* update the info frames with the data from the current display mode
@@ -246,7 +279,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
- r600_audio_set_clock(encoder, mode->clock);
+ r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND); /* send null packets when required */
@@ -415,114 +448,73 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
/*
* enable the HDMI engine
*/
-void r600_hdmi_enable(struct drm_encoder *encoder)
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset;
- u32 hdmi;
-
- if (ASIC_IS_DCE6(rdev))
- return;
+ u32 hdmi = HDMI0_ERROR_ACK;
/* Silent, r600_hdmi_enable will raise WARN for us */
- if (dig->afmt->enabled)
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
return;
- offset = dig->afmt->offset;
/* Older chipsets require setting HDMI and routing manually */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
+ if (!ASIC_IS_DCE3(rdev)) {
+ if (enable)
+ hdmi |= HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
- ~AVIVO_TMDSA_CNTL_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+ if (enable) {
+ WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+ } else {
+ WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
- ~AVIVO_LVTMA_CNTL_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ if (enable) {
+ WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ } else {
+ WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ if (enable) {
+ WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ } else {
+ WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+ if (enable)
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
break;
}
- WREG32(HDMI0_CONTROL + offset, hdmi);
+ WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
}
if (rdev->irq.installed) {
/* if irq is available use it */
- radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+ /* XXX: shouldn't need this on any asics. Double check DCE2/3 */
+ if (enable)
+ radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+ else
+ radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
}
- dig->afmt->enabled = true;
+ dig->afmt->enabled = enable;
- DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
}
-/*
- * disable the HDMI engine
- */
-void r600_hdmi_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset;
-
- if (ASIC_IS_DCE6(rdev))
- return;
-
- /* Called for ATOM_ENCODER_MODE_HDMI only */
- if (!dig || !dig->afmt) {
- return;
- }
- if (!dig->afmt->enabled)
- return;
- offset = dig->afmt->offset;
-
- DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
-
- /* disable irq */
- radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
-
- /* Older chipsets not handled by AtomBIOS */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0,
- ~AVIVO_TMDSA_CNTL_HDMI_EN);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0,
- ~AVIVO_LVTMA_CNTL_HDMI_EN);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- break;
- default:
- dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
- radeon_encoder->encoder_id);
- break;
- }
- WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
- }
-
- dig->afmt->enabled = false;
-}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a42ba11..acb146c 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -691,6 +691,7 @@
#define SRBM_SOFT_RESET 0xe60
# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
+# define SOFT_RESET_UVD (1 << 18)
# define RV770_SOFT_RESET_DMA (1 << 20)
#define CP_INT_CNTL 0xc124
@@ -909,7 +910,12 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
-/* Audio clocks */
+/* Audio clocks DCE 2.0/3.0 */
+#define AUDIO_DTO 0x7340
+# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
+# define AUDIO_DTO_MODULE(x) (((x) & 0xffff) << 16)
+
+/* Audio clocks DCE 3.2 */
#define DCCG_AUDIO_DTO0_PHASE 0x0514
#define DCCG_AUDIO_DTO0_MODULE 0x0518
#define DCCG_AUDIO_DTO0_LOAD 0x051c
@@ -1143,6 +1149,70 @@
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW 0xef00
+#define UVD_SEMA_ADDR_HIGH 0xef04
+#define UVD_SEMA_CMD 0xef08
+
+#define UVD_GPCOM_VCPU_CMD 0xef0c
+#define UVD_GPCOM_VCPU_DATA0 0xef10
+#define UVD_GPCOM_VCPU_DATA1 0xef14
+#define UVD_ENGINE_CNTL 0xef18
+
+#define UVD_SEMA_CNTL 0xf400
+#define UVD_RB_ARB_CTRL 0xf480
+
+#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_CGC_GATE 0xf4a8
+#define UVD_LMI_CTRL2 0xf4f4
+#define UVD_MASTINT_EN 0xf500
+#define UVD_LMI_ADDR_EXT 0xf594
+#define UVD_LMI_CTRL 0xf598
+#define UVD_LMI_SWAP_CNTL 0xf5b4
+#define UVD_MP_SWAP_CNTL 0xf5bC
+#define UVD_MPC_CNTL 0xf5dC
+#define UVD_MPC_SET_MUXA0 0xf5e4
+#define UVD_MPC_SET_MUXA1 0xf5e8
+#define UVD_MPC_SET_MUXB0 0xf5eC
+#define UVD_MPC_SET_MUXB1 0xf5f0
+#define UVD_MPC_SET_MUX 0xf5f4
+#define UVD_MPC_SET_ALU 0xf5f8
+
+#define UVD_VCPU_CNTL 0xf660
+#define UVD_SOFT_RESET 0xf680
+#define RBC_SOFT_RESET (1<<0)
+#define LBSI_SOFT_RESET (1<<1)
+#define LMI_SOFT_RESET (1<<2)
+#define VCPU_SOFT_RESET (1<<3)
+#define CSM_SOFT_RESET (1<<5)
+#define CXW_SOFT_RESET (1<<6)
+#define TAP_SOFT_RESET (1<<7)
+#define LMI_UMC_SOFT_RESET (1<<13)
+#define UVD_RBC_IB_BASE 0xf684
+#define UVD_RBC_IB_SIZE 0xf688
+#define UVD_RBC_RB_BASE 0xf68c
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_RBC_RB_WPTR_CNTL 0xf698
+
+#define UVD_STATUS 0xf6bc
+
+#define UVD_SEMA_TIMEOUT_STATUS 0xf6c0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0xf6c4
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0xf6c8
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0xf6cc
+
+#define UVD_RBC_RB_CNTL 0xf6a4
+#define UVD_RBC_RB_RPTR_ADDR 0xf6a8
+
+#define UVD_CONTEXT_ID 0xf6f4
+
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+
+/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8263af3..1442ce7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -95,6 +95,7 @@ extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
extern int radeon_lockup_timeout;
+extern int radeon_fastfb;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -109,24 +110,27 @@ extern int radeon_lockup_timeout;
#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 5
+#define RADEON_NUM_RINGS 6
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* R600+ has an async dma ring */
#define R600_RING_TYPE_DMA_INDEX 3
/* cayman add a second async dma ring */
#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+/* R600+ */
+#define R600_RING_TYPE_UVD_INDEX 5
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -202,6 +206,11 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
@@ -349,7 +358,8 @@ struct radeon_bo {
struct radeon_device *rdev;
struct drm_gem_object gem_base;
- struct ttm_bo_kmap_obj dma_buf_vmap;
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ pid_t pid;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -357,11 +367,14 @@ struct radeon_bo_list {
struct ttm_validate_buffer tv;
struct radeon_bo *bo;
uint64_t gpu_offset;
- unsigned rdomain;
- unsigned wdomain;
+ bool written;
+ unsigned domain;
+ unsigned alt_domain;
u32 tiling_flags;
};
+int radeon_gem_debugfs_init(struct radeon_device *rdev);
+
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
@@ -517,6 +530,7 @@ struct radeon_mc {
bool vram_is_ddr;
bool igp_sideport_enabled;
u64 gtt_base_align;
+ u64 mc_mask;
};
bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -918,6 +932,7 @@ struct radeon_wb {
#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
+#define R600_WB_UVD_RPTR_OFFSET 2560
#define R600_WB_EVENT_OFFSET 3072
/**
@@ -1118,6 +1133,46 @@ struct radeon_pm {
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
+/*
+ * UVD
+ */
+#define RADEON_MAX_UVD_HANDLES 10
+#define RADEON_UVD_STACK_SIZE (1024*1024)
+#define RADEON_UVD_HEAP_SIZE (1024*1024)
+
+struct radeon_uvd {
+ struct radeon_bo *vcpu_bo;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ atomic_t handles[RADEON_MAX_UVD_HANDLES];
+ struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
+ struct delayed_work idle_work;
+};
+
+int radeon_uvd_init(struct radeon_device *rdev);
+void radeon_uvd_fini(struct radeon_device *rdev);
+int radeon_uvd_suspend(struct radeon_device *rdev);
+int radeon_uvd_resume(struct radeon_device *rdev);
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_free_handles(struct radeon_device *rdev,
+ struct drm_file *filp);
+int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+void radeon_uvd_note_usage(struct radeon_device *rdev);
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ unsigned vclk, unsigned dclk,
+ unsigned vco_min, unsigned vco_max,
+ unsigned fb_factor, unsigned fb_mask,
+ unsigned pd_min, unsigned pd_max,
+ unsigned pd_even,
+ unsigned *optimal_fb_div,
+ unsigned *optimal_vclk_div,
+ unsigned *optimal_dclk_div);
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+ unsigned cg_upll_func_cntl);
struct r600_audio {
int channels;
@@ -1229,6 +1284,9 @@ struct radeon_asic {
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
/* get backlight level */
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+ /* audio callbacks */
+ void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
+ void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
} display;
/* copy functions for bo handling */
struct {
@@ -1281,6 +1339,7 @@ struct radeon_asic {
int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+ int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
} pm;
/* pageflipping */
struct {
@@ -1443,6 +1502,7 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
+ uint32_t tile_mode_array[32];
};
union radeon_asic_config {
@@ -1608,6 +1668,7 @@ struct radeon_device {
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
+ struct radeon_uvd uvd;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
@@ -1615,12 +1676,14 @@ struct radeon_device {
bool suspend;
bool need_dma32;
bool accel_working;
+ bool fastfb_working; /* IGP feature*/
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
+ const struct firmware *uvd_fw; /* UVD firmware */
struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
@@ -1688,8 +1751,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
-#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
-#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -1697,6 +1760,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \
WREG32(reg, tmp_); \
} while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
@@ -1830,6 +1895,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
+#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1845,6 +1912,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1892,6 +1960,9 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+extern void radeon_program_register_sequence(struct radeon_device *rdev,
+ const u32 *registers,
+ const u32 array_size);
/*
* vm
@@ -1964,9 +2035,6 @@ struct radeon_hdmi_acr {
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
-extern void r600_hdmi_enable(struct drm_encoder *encoder);
-extern void r600_hdmi_disable(struct drm_encoder *encoder);
-extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -1977,8 +2045,6 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
* evergreen functions used by radeon_encoder.c
*/
-extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index aba0a89..6417132 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -656,6 +656,8 @@ static struct radeon_asic rs600_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r100_copy_blit,
@@ -732,6 +734,8 @@ static struct radeon_asic rs690_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r100_copy_blit,
@@ -970,6 +974,8 @@ static struct radeon_asic r600_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1056,6 +1062,8 @@ static struct radeon_asic rs780_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1130,6 +1138,15 @@ static struct radeon_asic rv770_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1142,6 +1159,8 @@ static struct radeon_asic rv770_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1174,6 +1193,7 @@ static struct radeon_asic rv770_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_uvd_clocks = &rv770_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1216,6 +1236,15 @@ static struct radeon_asic evergreen_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1228,6 +1257,8 @@ static struct radeon_asic evergreen_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1260,6 +1291,7 @@ static struct radeon_asic evergreen_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1302,6 +1334,15 @@ static struct radeon_asic sumo_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1314,6 +1355,8 @@ static struct radeon_asic sumo_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1346,6 +1389,7 @@ static struct radeon_asic sumo_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &sumo_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1388,6 +1432,15 @@ static struct radeon_asic btc_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1400,6 +1453,8 @@ static struct radeon_asic btc_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1429,9 +1484,10 @@ static struct radeon_asic btc_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1517,6 +1573,15 @@ static struct radeon_asic cayman_asic = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1529,6 +1594,8 @@ static struct radeon_asic cayman_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1558,9 +1625,10 @@ static struct radeon_asic cayman_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1646,6 +1714,15 @@ static struct radeon_asic trinity_asic = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1690,6 +1767,7 @@ static struct radeon_asic trinity_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &sumo_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1775,6 +1853,15 @@ static struct radeon_asic si_asic = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1816,9 +1903,10 @@ static struct radeon_asic si_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &si_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3535f73..2c87365 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -330,6 +330,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -373,11 +374,12 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
@@ -392,6 +394,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
+/* uvd */
+int r600_uvd_init(struct radeon_device *rdev);
+int r600_uvd_rbc_start(struct radeon_device *rdev);
+void r600_uvd_rbc_stop(struct radeon_device *rdev);
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
/*
* rv770,rv730,rv710,rv740
*/
@@ -409,6 +424,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
+int rv770_uvd_resume(struct radeon_device *rdev);
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
/*
* evergreen
@@ -444,6 +461,8 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void btc_pm_init_profile(struct radeon_device *rdev);
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -459,12 +478,18 @@ int evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/*
* cayman
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@@ -524,5 +549,6 @@ int si_copy_dma(struct radeon_device *rdev,
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f22eb57..dea6f63c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ if (num_modes == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
@@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+ if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
- } else if (ASIC_IS_DCE6(rdev)) {
+ } else if (rdev->family >= CHIP_TAHITI) {
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->si.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->si.usVDDCI);
- } else if (ASIC_IS_DCE4(rdev)) {
+ } else if (rdev->family >= CHIP_CEDAR) {
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ if (power_info->pplib.ucNumStates == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
+ u8 *power_state_offset;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ if (state_array->ucNumEntries == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
+ power_state_offset = (u8 *)state_array->states;
for (i = 0; i < state_array->ucNumEntries; i++) {
mode_index = 0;
- power_state = (union pplib_power_state *)&state_array->states[i];
- /* XXX this might be an inagua bug... */
- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info);
state_index++;
}
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
default:
break;
}
- } else {
+ }
+
+ if (state_index == 0) {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
@@ -2654,6 +2661,111 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_vddc = 0;
}
+union get_clock_dividers {
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+};
+
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers)
+{
+ union get_clock_dividers args;
+ int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+ u8 frev, crev;
+
+ memset(&args, 0, sizeof(args));
+ memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ /* r4xx, r5xx */
+ args.v1.ucAction = clock_type;
+ args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v1.ucPostDiv;
+ dividers->fb_div = args.v1.ucFbDiv;
+ dividers->enable_post_div = true;
+ break;
+ case 2:
+ case 3:
+ /* r6xx, r7xx, evergreen, ni */
+ if (rdev->family <= CHIP_RV770) {
+ args.v2.ucAction = clock_type;
+ args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v2.ucPostDiv;
+ dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
+ dividers->ref_div = args.v2.ucAction;
+ if (rdev->family == CHIP_RV770) {
+ dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
+ true : false;
+ dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
+ } else
+ dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
+ } else {
+ if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+ args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v3.ucPostDiv;
+ dividers->enable_post_div = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v3.ucRefDiv;
+ dividers->vco_mode = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ } else {
+ args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+ if (strobe_mode)
+ args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v5.ucPostDiv;
+ dividers->enable_post_div = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v5.ucRefDiv;
+ dividers->vco_mode = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ }
+ }
+ break;
+ case 4:
+ /* fusion */
+ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v4.ucPostDiv;
+ dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70d3824..7e265a5 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -63,30 +63,50 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
break;
}
}
- if (!duplicate) {
- p->relocs[i].gobj = drm_gem_object_lookup(ddev,
- p->filp,
- r->handle);
- if (p->relocs[i].gobj == NULL) {
- DRM_ERROR("gem object lookup failed 0x%x\n",
- r->handle);
- return -ENOENT;
- }
- p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
- p->relocs[i].lobj.bo = p->relocs[i].robj;
- p->relocs[i].lobj.wdomain = r->write_domain;
- p->relocs[i].lobj.rdomain = r->read_domains;
- p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].handle = r->handle;
- p->relocs[i].flags = r->flags;
- radeon_bo_list_add_object(&p->relocs[i].lobj,
- &p->validated);
-
- } else
+ if (duplicate) {
p->relocs[i].handle = 0;
+ continue;
+ }
+
+ p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
+ r->handle);
+ if (p->relocs[i].gobj == NULL) {
+ DRM_ERROR("gem object lookup failed 0x%x\n",
+ r->handle);
+ return -ENOENT;
+ }
+ p->relocs_ptr[i] = &p->relocs[i];
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
+ p->relocs[i].lobj.written = !!r->write_domain;
+
+ /* the first reloc of an UVD job is the
+ msg and that must be in VRAM */
+ if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
+ /* TODO: is this still needed for NI+ ? */
+ p->relocs[i].lobj.domain =
+ RADEON_GEM_DOMAIN_VRAM;
+
+ p->relocs[i].lobj.alt_domain =
+ RADEON_GEM_DOMAIN_VRAM;
+
+ } else {
+ uint32_t domain = r->write_domain ?
+ r->write_domain : r->read_domains;
+
+ p->relocs[i].lobj.domain = domain;
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ p->relocs[i].lobj.alt_domain = domain;
+ }
+
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].handle = r->handle;
+
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
}
- return radeon_bo_list_validate(&p->validated);
+ return radeon_bo_list_validate(&p->validated, p->ring);
}
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -121,6 +141,9 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return -EINVAL;
}
break;
+ case RADEON_CS_RING_UVD:
+ p->ring = R600_RING_TYPE_UVD_INDEX;
+ break;
}
return 0;
}
@@ -241,15 +264,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- /* we only support VM on SI+ */
- if ((p->rdev->family >= CHIP_TAHITI) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("VM required on SI+!\n");
+ if (radeon_cs_get_ring(p, ring, priority))
return -EINVAL;
- }
- if (radeon_cs_get_ring(p, ring, priority))
+ /* we only support VM on some SI+ rings */
+ if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL;
+ }
}
/* deal with non-vm */
@@ -526,6 +549,10 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
+
+ if (parser.ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 44b8034..a8f6089 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,6 +98,42 @@ static const char radeon_family_name[][16] = {
};
/**
+ * radeon_program_register_sequence - program an array of registers.
+ *
+ * @rdev: radeon_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void radeon_program_register_sequence(struct radeon_device *rdev,
+ const u32 *registers,
+ const u32 array_size)
+{
+ u32 tmp, reg, and_mask, or_mask;
+ int i;
+
+ if (array_size % 3)
+ return;
+
+ for (i = 0; i < array_size; i +=3) {
+ reg = registers[i + 0];
+ and_mask = registers[i + 1];
+ or_mask = registers[i + 2];
+
+ if (and_mask == 0xffffffff) {
+ tmp = or_mask;
+ } else {
+ tmp = RREG32(reg);
+ tmp &= ~and_mask;
+ tmp |= or_mask;
+ }
+ WREG32(reg, tmp);
+ }
+}
+
+/**
* radeon_surface_init - Clear GPU surface registers.
*
* @rdev: radeon_device pointer
@@ -359,7 +395,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
mc->vram_start = base;
- if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+ if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -394,7 +430,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_af, size_bf;
- size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+ size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
@@ -1068,6 +1104,17 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_agp_disable(rdev);
}
+ /* Set the internal MC address mask
+ * This is the max address of the GPU's
+ * internal address space.
+ */
+ if (rdev->family >= CHIP_CAYMAN)
+ rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+ else if (rdev->family >= CHIP_CEDAR)
+ rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
+ else
+ rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
+
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
@@ -1131,6 +1178,11 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ r = radeon_gem_debugfs_init(rdev);
+ if (r) {
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+ }
+
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 66a7f0f..d33f484 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -71,9 +71,12 @@
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
* 2.29.0 - R500 FP16 color clear registers
* 2.30.0 - fix for FMASK texturing
+ * 2.31.0 - Add fastfb support for rs690
+ * 2.32.0 - new info request for rings working
+ * 2.33.0 - Add SI tiling mode array query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 30
+#define KMS_DRIVER_MINOR 33
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -160,6 +163,7 @@ int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
+int radeon_fastfb = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -212,6 +216,9 @@ module_param_named(msi, radeon_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
+module_param_named(fastfb, radeon_fastfb, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3435625..5b937df 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -31,9 +31,9 @@
#include <linux/seq_file.h>
#include <linux/atomic.h>
#include <linux/wait.h>
-#include <linux/list.h>
#include <linux/kref.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -768,7 +768,19 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
- index = R600_WB_EVENT_OFFSET + ring * 4;
+ if (ring != R600_RING_TYPE_UVD_INDEX) {
+ index = R600_WB_EVENT_OFFSET + ring * 4;
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
+ index;
+
+ } else {
+ /* put fence directly behind firmware */
+ index = ALIGN(rdev->uvd_fw->size, 8);
+ rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
+ rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
+ }
+
} else {
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
@@ -778,9 +790,9 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg -
rdev->scratch.reg_base;
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
}
- rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
- rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe5c1f6..aa79603 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -84,6 +84,7 @@ retry:
return r;
}
*obj = &robj->gem_base;
+ robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
@@ -575,3 +576,52 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
{
return drm_gem_handle_delete(file_priv, handle);
}
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *rbo;
+ unsigned i = 0;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_for_each_entry(rbo, &rdev->gem.objects, list) {
+ unsigned domain;
+ const char *placement;
+
+ domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+ switch (domain) {
+ case RADEON_GEM_DOMAIN_VRAM:
+ placement = "VRAM";
+ break;
+ case RADEON_GEM_DOMAIN_GTT:
+ placement = " GTT";
+ break;
+ case RADEON_GEM_DOMAIN_CPU:
+ default:
+ placement = " CPU";
+ break;
+ }
+ seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+ i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
+ placement, (unsigned long)rbo->pid);
+ i++;
+ }
+ mutex_unlock(&rdev->gem.mutex);
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_gem_list[] = {
+ {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c75cb2c..4f2d4f4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
+ if (rdev->rmmio == NULL)
+ goto done_free;
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+
+done_free:
kfree(rdev);
dev->dev_private = NULL;
return 0;
@@ -176,80 +180,65 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
- uint32_t value, *value_ptr;
- uint64_t value64, *value_ptr64;
+ uint32_t *value, value_tmp, *value_ptr, value_size;
+ uint64_t value64;
struct drm_crtc *crtc;
int i, found;
- /* TIMESTAMP is a 64-bit value, needs special handling. */
- if (info->request == RADEON_INFO_TIMESTAMP) {
- if (rdev->family >= CHIP_R600) {
- value_ptr64 = (uint64_t*)((unsigned long)info->value);
- value64 = radeon_get_gpu_clock_counter(rdev);
-
- if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
- DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
- return 0;
- } else {
- DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
- return -EINVAL;
- }
- }
-
value_ptr = (uint32_t *)((unsigned long)info->value);
- if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
+ value = &value_tmp;
+ value_size = sizeof(uint32_t);
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- value = dev->pci_device;
+ *value = dev->pci_device;
break;
case RADEON_INFO_NUM_GB_PIPES:
- value = rdev->num_gb_pipes;
+ *value = rdev->num_gb_pipes;
break;
case RADEON_INFO_NUM_Z_PIPES:
- value = rdev->num_z_pipes;
+ *value = rdev->num_z_pipes;
break;
case RADEON_INFO_ACCEL_WORKING:
/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
- value = false;
+ *value = false;
else
- value = rdev->accel_working;
+ *value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
crtc = (struct drm_crtc *)minfo->crtcs[i];
- if (crtc && crtc->base.id == value) {
+ if (crtc && crtc->base.id == *value) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- value = radeon_crtc->crtc_id;
+ *value = radeon_crtc->crtc_id;
found = 1;
break;
}
}
if (!found) {
- DRM_DEBUG_KMS("unknown crtc id %d\n", value);
+ DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
return -EINVAL;
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- value = rdev->accel_working;
+ *value = rdev->accel_working;
break;
case RADEON_INFO_TILING_CONFIG:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.tile_config;
+ *value = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.tile_config;
+ *value = rdev->config.cayman.tile_config;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.tile_config;
+ *value = rdev->config.evergreen.tile_config;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.tile_config;
+ *value = rdev->config.rv770.tile_config;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.tile_config;
+ *value = rdev->config.r600.tile_config;
else {
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
return -EINVAL;
@@ -262,73 +251,81 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*
* When returning, the value is 1 if filp owns hyper-z access,
* 0 otherwise. */
- if (value >= 2) {
- DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (*value >= 2) {
+ DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
return -EINVAL;
}
- radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
break;
case RADEON_INFO_WANT_CMASK:
/* The same logic as Hyper-Z. */
- if (value >= 2) {
- DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (*value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
return -EINVAL;
}
- radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
if (rdev->asic->get_xclk)
- value = radeon_get_xclk(rdev) * 10;
+ *value = radeon_get_xclk(rdev) * 10;
else
- value = rdev->clock.spll.reference_freq * 10;
+ *value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_backends_per_se *
+ *value = rdev->config.si.max_backends_per_se *
rdev->config.si.max_shader_engines;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_backends_per_se *
+ *value = rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.max_backends;
+ *value = rdev->config.evergreen.max_backends;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.max_backends;
+ *value = rdev->config.rv770.max_backends;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.max_backends;
+ *value = rdev->config.r600.max_backends;
else {
return -EINVAL;
}
break;
case RADEON_INFO_NUM_TILE_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_tile_pipes;
+ *value = rdev->config.si.max_tile_pipes;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_tile_pipes;
+ *value = rdev->config.cayman.max_tile_pipes;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.max_tile_pipes;
+ *value = rdev->config.evergreen.max_tile_pipes;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.max_tile_pipes;
+ *value = rdev->config.rv770.max_tile_pipes;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.max_tile_pipes;
+ *value = rdev->config.r600.max_tile_pipes;
else {
return -EINVAL;
}
break;
case RADEON_INFO_FUSION_GART_WORKING:
- value = 1;
+ *value = 1;
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.backend_map;
+ *value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.backend_map;
+ *value = rdev->config.cayman.backend_map;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.backend_map;
+ *value = rdev->config.evergreen.backend_map;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.backend_map;
+ *value = rdev->config.rv770.backend_map;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.backend_map;
+ *value = rdev->config.r600.backend_map;
else {
return -EINVAL;
}
@@ -337,50 +334,91 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* this is where we report if vm is supported or not */
if (rdev->family < CHIP_CAYMAN)
return -EINVAL;
- value = RADEON_VA_RESERVED_SIZE;
+ *value = RADEON_VA_RESERVED_SIZE;
break;
case RADEON_INFO_IB_VM_MAX_SIZE:
/* this is where we report if vm is supported or not */
if (rdev->family < CHIP_CAYMAN)
return -EINVAL;
- value = RADEON_IB_VM_MAX_SIZE;
+ *value = RADEON_IB_VM_MAX_SIZE;
break;
case RADEON_INFO_MAX_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_cu_per_sh;
+ *value = rdev->config.si.max_cu_per_sh;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_pipes_per_simd;
+ *value = rdev->config.cayman.max_pipes_per_simd;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.max_pipes;
+ *value = rdev->config.evergreen.max_pipes;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.max_pipes;
+ *value = rdev->config.rv770.max_pipes;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.max_pipes;
+ *value = rdev->config.r600.max_pipes;
else {
return -EINVAL;
}
break;
+ case RADEON_INFO_TIMESTAMP:
+ if (rdev->family < CHIP_R600) {
+ DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = radeon_get_gpu_clock_counter(rdev);
+ break;
case RADEON_INFO_MAX_SE:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_shader_engines;
+ *value = rdev->config.si.max_shader_engines;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_shader_engines;
+ *value = rdev->config.cayman.max_shader_engines;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.num_ses;
+ *value = rdev->config.evergreen.num_ses;
else
- value = 1;
+ *value = 1;
break;
case RADEON_INFO_MAX_SH_PER_SE:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_sh_per_se;
+ *value = rdev->config.si.max_sh_per_se;
else
return -EINVAL;
break;
+ case RADEON_INFO_FASTFB_WORKING:
+ *value = rdev->fastfb_working;
+ break;
+ case RADEON_INFO_RING_WORKING:
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ switch (*value) {
+ case RADEON_CS_RING_GFX:
+ case RADEON_CS_RING_COMPUTE:
+ *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
+ break;
+ case RADEON_CS_RING_DMA:
+ *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
+ *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
+ break;
+ case RADEON_CS_RING_UVD:
+ *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_SI_TILE_MODE_ARRAY:
+ if (rdev->family < CHIP_TAHITI) {
+ DRM_DEBUG_KMS("tile mode array is si only!\n");
+ return -EINVAL;
+ }
+ value = rdev->config.si.tile_mode_array;
+ value_size = sizeof(uint32_t)*32;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
- if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
+ if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -513,6 +551,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
rdev->hyperz_filp = NULL;
if (rdev->cmask_filp == file_priv)
rdev->cmask_filp = NULL;
+ radeon_uvd_free_handles(rdev, file_priv);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4003f5a..44e579e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -492,6 +492,29 @@ struct radeon_framebuffer {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
+struct atom_clock_dividers {
+ u32 post_div;
+ union {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 reserved : 6;
+ u32 whole_fb_div : 12;
+ u32 frac_fb_div : 14;
+#else
+ u32 frac_fb_div : 14;
+ u32 whole_fb_div : 12;
+ u32 reserved : 6;
+#endif
+ };
+ u32 fb_div;
+ };
+ u32 ref_div;
+ bool enable_post_div;
+ bool enable_dithen;
+ u32 vco_mode;
+ u32 real_clock;
+};
+
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3aface..1424ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -321,8 +321,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
/* Add an MTRR for the VRAM */
- rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+ if (!rdev->fastfb_working) {
+ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
MTRR_TYPE_WRCOMB, 1);
+ }
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
rdev->mc.mc_vram_size >> 20,
(unsigned long long)rdev->mc.aper_size >> 20);
@@ -339,14 +341,14 @@ void radeon_bo_fini(struct radeon_device *rdev)
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head)
{
- if (lobj->wdomain) {
+ if (lobj->written) {
list_add(&lobj->tv.head, head);
} else {
list_add_tail(&lobj->tv.head, head);
}
}
-int radeon_bo_list_validate(struct list_head *head)
+int radeon_bo_list_validate(struct list_head *head, int ring)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
@@ -360,15 +362,17 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
- domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+ domain = lobj->domain;
retry:
radeon_ttm_placement_from_domain(bo, domain);
+ if (ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
- domain |= RADEON_GEM_DOMAIN_GTT;
+ if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
+ domain = lobj->alt_domain;
goto retry;
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5fc86b0..e2cb80a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -128,7 +128,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, int ring);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 338fd6a..788c64c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d58e26..e17faa7 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
- if (ib->vm && !ib->vm->last_flush) {
+ /* XXX figure out why we have to flush for every IB */
+ if (ib->vm /*&& !ib->vm->last_flush*/) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
@@ -368,7 +369,7 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
- if (rdev->wb.enabled)
+ if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(ring->rptr_reg);
@@ -821,18 +822,20 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
return 0;
}
-static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
-static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
-static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
-static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
-static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
- {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
- {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
- {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
- {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
- {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
+ {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index cb80099..0abe5a9 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -64,7 +64,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
+ domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index fda09c9..bbed4af 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -252,6 +252,36 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
}
+static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_fence **fence)
+{
+ int r;
+
+ if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed to get dummy create msg\n");
+ return r;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
+ if (r) {
+ DRM_ERROR("Failed to get dummy destroy msg\n");
+ return r;
+ }
+ } else {
+ r = radeon_ring_lock(rdev, ring, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+ return r;
+ }
+ radeon_fence_emit(rdev, fence, ring->idx);
+ radeon_ring_unlock_commit(rdev, ring);
+ }
+ return 0;
+}
+
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
@@ -272,21 +302,24 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- r = radeon_fence_emit(rdev, &fence1, ringA->idx);
- if (r) {
- DRM_ERROR("Failed to emit fence 1\n");
- radeon_ring_unlock_undo(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
+ if (r)
goto out_cleanup;
- }
- radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- r = radeon_fence_emit(rdev, &fence2, ringA->idx);
+
+ r = radeon_ring_lock(rdev, ringA, 64);
if (r) {
- DRM_ERROR("Failed to emit fence 2\n");
- radeon_ring_unlock_undo(rdev, ringA);
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
goto out_cleanup;
}
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA);
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
+ if (r)
+ goto out_cleanup;
+
mdelay(1000);
if (radeon_fence_signaled(fence1)) {
@@ -364,27 +397,22 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
- if (r) {
- DRM_ERROR("Failed to emit sync fence 1\n");
- radeon_ring_unlock_undo(rdev, ringA);
- goto out_cleanup;
- }
radeon_ring_unlock_commit(rdev, ringA);
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
+ if (r)
+ goto out_cleanup;
+
r = radeon_ring_lock(rdev, ringB, 64);
if (r) {
DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
- r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
- if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
- radeon_ring_unlock_undo(rdev, ringB);
- goto out_cleanup;
- }
radeon_ring_unlock_commit(rdev, ringB);
+ r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
+ if (r)
+ goto out_cleanup;
mdelay(1000);
@@ -393,7 +421,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
if (radeon_fence_signaled(fenceB)) {
- DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 0000000..906e5c0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "r600d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS 1000
+
+/* Firmware Names */
+#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
+#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
+#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
+#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
+
+MODULE_FIRMWARE(FIRMWARE_RV710);
+MODULE_FIRMWARE(FIRMWARE_CYPRESS);
+MODULE_FIRMWARE(FIRMWARE_SUMO);
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
+int radeon_uvd_init(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ unsigned long bo_size;
+ const char *fw_name;
+ int i, r;
+
+ INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
+ pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
+ r = IS_ERR(pdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_RV710:
+ case CHIP_RV730:
+ case CHIP_RV740:
+ fw_name = FIRMWARE_RV710;
+ break;
+
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ case CHIP_REDWOOD:
+ case CHIP_CEDAR:
+ fw_name = FIRMWARE_CYPRESS;
+ break;
+
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ case CHIP_PALM:
+ case CHIP_CAYMAN:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ fw_name = FIRMWARE_SUMO;
+ break;
+
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ case CHIP_PITCAIRN:
+ case CHIP_ARUBA:
+ fw_name = FIRMWARE_TAHITI;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+ fw_name);
+ platform_device_unregister(pdev);
+ return r;
+ }
+
+ platform_device_unregister(pdev);
+
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
+ RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
+ r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ memset(rdev->uvd.cpu_addr, 0, bo_size);
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+ r = radeon_uvd_suspend(rdev);
+ if (r)
+ return r;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ atomic_set(&rdev->uvd.handles[i], 0);
+ rdev->uvd.filp[i] = NULL;
+ }
+
+ return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+ radeon_uvd_suspend(rdev);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return 0;
+
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (!r) {
+ radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+ radeon_bo_unpin(rdev->uvd.vcpu_bo);
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ }
+ return r;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return -EINVAL;
+
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (r) {
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->uvd.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+ return r;
+ }
+
+ r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) UVD map failed\n", r);
+ return r;
+ }
+
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+ return 0;
+}
+
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+{
+ rbo->placement.fpfn = 0 >> PAGE_SHIFT;
+ rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+}
+
+void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+ int i, r;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (rdev->uvd.filp[i] == filp) {
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ struct radeon_fence *fence;
+
+ r = radeon_uvd_get_destroy_msg(rdev,
+ R600_RING_TYPE_UVD_INDEX, handle, &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ continue;
+ }
+
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+
+ rdev->uvd.filp[i] = NULL;
+ atomic_set(&rdev->uvd.handles[i], 0);
+ }
+ }
+}
+
+static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+ unsigned stream_type = msg[4];
+ unsigned width = msg[6];
+ unsigned height = msg[7];
+ unsigned dpb_size = msg[9];
+ unsigned pitch = msg[28];
+
+ unsigned width_in_mb = width / 16;
+ unsigned height_in_mb = ALIGN(height / 16, 2);
+
+ unsigned image_size, tmp, min_dpb_size;
+
+ image_size = width * height;
+ image_size += image_size / 2;
+ image_size = ALIGN(image_size, 1024);
+
+ switch (stream_type) {
+ case 0: /* H264 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 17;
+
+ /* macroblock context buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 32;
+ break;
+
+ case 1: /* VC1 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+
+ /* CONTEXT_BUFFER */
+ min_dpb_size += width_in_mb * height_in_mb * 128;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * 64;
+
+ /* DB surface buffer */
+ min_dpb_size += width_in_mb * 128;
+
+ /* BP */
+ tmp = max(width_in_mb, height_in_mb);
+ min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+ break;
+
+ case 3: /* MPEG2 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+ break;
+
+ case 4: /* MPEG4 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+
+ /* CM */
+ min_dpb_size += width_in_mb * height_in_mb * 64;
+
+ /* IT surface buffer */
+ min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+ break;
+
+ default:
+ DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+ return -EINVAL;
+ }
+
+ if (width > pitch) {
+ DRM_ERROR("Invalid UVD decoding target pitch!\n");
+ return -EINVAL;
+ }
+
+ if (dpb_size < min_dpb_size) {
+ DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+ dpb_size, min_dpb_size);
+ return -EINVAL;
+ }
+
+ buf_sizes[0x1] = dpb_size;
+ buf_sizes[0x2] = image_size;
+ return 0;
+}
+
+static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ unsigned offset, unsigned buf_sizes[])
+{
+ int32_t *msg, msg_type, handle;
+ void *ptr;
+
+ int i, r;
+
+ if (offset & 0x3F) {
+ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_bo_kmap(bo, &ptr);
+ if (r)
+ return r;
+
+ msg = ptr + offset;
+
+ msg_type = msg[1];
+ handle = msg[2];
+
+ if (handle == 0) {
+ DRM_ERROR("Invalid UVD handle!\n");
+ return -EINVAL;
+ }
+
+ if (msg_type == 1) {
+ /* it's a decode msg, calc buffer sizes */
+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ radeon_bo_kunmap(bo);
+ if (r)
+ return r;
+
+ } else if (msg_type == 2) {
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+ radeon_bo_kunmap(bo);
+ return 0;
+ } else {
+ /* it's a create msg, no special handling needed */
+ radeon_bo_kunmap(bo);
+ }
+
+ /* create or decode, validate the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+ return 0;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+ p->rdev->uvd.filp[i] = p->filp;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+}
+
+static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+ int data0, int data1,
+ unsigned buf_sizes[])
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_reloc *reloc;
+ unsigned idx, cmd, offset;
+ uint64_t start, end;
+ int r;
+
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ offset = radeon_get_ib_value(p, data0);
+ idx = radeon_get_ib_value(p, data1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+
+ reloc = p->relocs_ptr[(idx / 4)];
+ start = reloc->lobj.gpu_offset;
+ end = start + radeon_bo_size(reloc->robj);
+ start += offset;
+
+ p->ib.ptr[data0] = start & 0xFFFFFFFF;
+ p->ib.ptr[data1] = start >> 32;
+
+ cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+ if (cmd < 0x4) {
+ if ((end - start) < buf_sizes[cmd]) {
+ DRM_ERROR("buffer to small (%d / %d)!\n",
+ (unsigned)(end - start), buf_sizes[cmd]);
+ return -EINVAL;
+ }
+
+ } else if (cmd != 0x100) {
+ DRM_ERROR("invalid UVD command %X!\n", cmd);
+ return -EINVAL;
+ }
+
+ if ((start >> 28) != (end >> 28)) {
+ DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ /* TODO: is this still necessary on NI+ ? */
+ if ((cmd == 0 || cmd == 0x3) &&
+ (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+ DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ if (cmd == 0) {
+ r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int *data0, int *data1,
+ unsigned buf_sizes[])
+{
+ int i, r;
+
+ p->idx++;
+ for (i = 0; i <= pkt->count; ++i) {
+ switch (pkt->reg + i*4) {
+ case UVD_GPCOM_VCPU_DATA0:
+ *data0 = p->idx;
+ break;
+ case UVD_GPCOM_VCPU_DATA1:
+ *data1 = p->idx;
+ break;
+ case UVD_GPCOM_VCPU_CMD:
+ r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ if (r)
+ return r;
+ break;
+ case UVD_ENGINE_CNTL:
+ break;
+ default:
+ DRM_ERROR("Invalid reg 0x%X!\n",
+ pkt->reg + i*4);
+ return -EINVAL;
+ }
+ p->idx++;
+ }
+ return 0;
+}
+
+int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ int r, data0 = 0, data1 = 0;
+
+ /* minimum buffer sizes */
+ unsigned buf_sizes[] = {
+ [0x00000000] = 2048,
+ [0x00000001] = 32 * 1024 * 1024,
+ [0x00000002] = 2048 * 1152 * 3,
+ [0x00000003] = 2048,
+ };
+
+ if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+ DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+
+
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r)
+ return r;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ r = radeon_uvd_cs_reg(p, &pkt, &data0,
+ &data1, buf_sizes);
+ if (r)
+ return r;
+ break;
+ case RADEON_PACKET_TYPE2:
+ p->idx += pkt.count + 2;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
+
+static int radeon_uvd_send_msg(struct radeon_device *rdev,
+ int ring, struct radeon_bo *bo,
+ struct radeon_fence **fence)
+{
+ struct ttm_validate_buffer tv;
+ struct list_head head;
+ struct radeon_ib ib;
+ uint64_t addr;
+ int i, r;
+
+ memset(&tv, 0, sizeof(tv));
+ tv.bo = &bo->tbo;
+
+ INIT_LIST_HEAD(&head);
+ list_add(&tv.head, &head);
+
+ r = ttm_eu_reserve_buffers(&head);
+ if (r)
+ return r;
+
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
+ radeon_uvd_force_into_uvd_segment(bo);
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+
+ r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+
+ addr = radeon_bo_gpu_offset(bo);
+ ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
+ ib.ptr[1] = addr;
+ ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
+ ib.ptr[3] = addr >> 32;
+ ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
+ ib.ptr[5] = 0;
+ for (i = 6; i < 16; ++i)
+ ib.ptr[i] = PACKET2(0);
+ ib.length_dw = 16;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+ ttm_eu_fence_buffer_objects(&head, ib.fence);
+
+ if (fence)
+ *fence = radeon_fence_ref(ib.fence);
+
+ radeon_ib_free(rdev, &ib);
+ radeon_bo_unref(&bo);
+ return 0;
+}
+
+/* multiple fence commands without any stream commands in between can
+ crash the vcpu so just try to emmit a dummy create/destroy msg to
+ avoid this */
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ struct radeon_bo *bo;
+ uint32_t *msg;
+ int r, i;
+
+ r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (r)
+ return r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (r) {
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ r = radeon_bo_kmap(bo, (void **)&msg);
+ if (r) {
+ radeon_bo_unreserve(bo);
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ /* stitch together an UVD create msg */
+ msg[0] = 0x00000de4;
+ msg[1] = 0x00000000;
+ msg[2] = handle;
+ msg[3] = 0x00000000;
+ msg[4] = 0x00000000;
+ msg[5] = 0x00000000;
+ msg[6] = 0x00000000;
+ msg[7] = 0x00000780;
+ msg[8] = 0x00000440;
+ msg[9] = 0x00000000;
+ msg[10] = 0x01b37000;
+ for (i = 11; i < 1024; ++i)
+ msg[i] = 0x0;
+
+ radeon_bo_kunmap(bo);
+ radeon_bo_unreserve(bo);
+
+ return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ struct radeon_bo *bo;
+ uint32_t *msg;
+ int r, i;
+
+ r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (r)
+ return r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (r) {
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ r = radeon_bo_kmap(bo, (void **)&msg);
+ if (r) {
+ radeon_bo_unreserve(bo);
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ /* stitch together an UVD destroy msg */
+ msg[0] = 0x00000de4;
+ msg[1] = 0x00000002;
+ msg[2] = handle;
+ msg[3] = 0x00000000;
+ for (i = 4; i < 1024; ++i)
+ msg[i] = 0x0;
+
+ radeon_bo_kunmap(bo);
+ radeon_bo_unreserve(bo);
+
+ return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev =
+ container_of(work, struct radeon_device, uvd.idle_work.work);
+
+ if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ else
+ schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+ bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+ set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ if (set_clocks)
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+}
+
+static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
+ unsigned target_freq,
+ unsigned pd_min,
+ unsigned pd_even)
+{
+ unsigned post_div = vco_freq / target_freq;
+
+ /* adjust to post divider minimum value */
+ if (post_div < pd_min)
+ post_div = pd_min;
+
+ /* we alway need a frequency less than or equal the target */
+ if ((vco_freq / post_div) > target_freq)
+ post_div += 1;
+
+ /* post dividers above a certain value must be even */
+ if (post_div > pd_even && post_div % 2)
+ post_div += 1;
+
+ return post_div;
+}
+
+/**
+ * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
+ *
+ * @rdev: radeon_device pointer
+ * @vclk: wanted VCLK
+ * @dclk: wanted DCLK
+ * @vco_min: minimum VCO frequency
+ * @vco_max: maximum VCO frequency
+ * @fb_factor: factor to multiply vco freq with
+ * @fb_mask: limit and bitmask for feedback divider
+ * @pd_min: post divider minimum
+ * @pd_max: post divider maximum
+ * @pd_even: post divider must be even above this value
+ * @optimal_fb_div: resulting feedback divider
+ * @optimal_vclk_div: resulting vclk post divider
+ * @optimal_dclk_div: resulting dclk post divider
+ *
+ * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
+ * Returns zero on success -EINVAL on error.
+ */
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ unsigned vclk, unsigned dclk,
+ unsigned vco_min, unsigned vco_max,
+ unsigned fb_factor, unsigned fb_mask,
+ unsigned pd_min, unsigned pd_max,
+ unsigned pd_even,
+ unsigned *optimal_fb_div,
+ unsigned *optimal_vclk_div,
+ unsigned *optimal_dclk_div)
+{
+ unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
+
+ /* start off with something large */
+ unsigned optimal_score = ~0;
+
+ /* loop through vco from low to high */
+ vco_min = max(max(vco_min, vclk), dclk);
+ for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
+
+ uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
+ unsigned vclk_div, dclk_div, score;
+
+ do_div(fb_div, ref_freq);
+
+ /* fb div out of range ? */
+ if (fb_div > fb_mask)
+ break; /* it can oly get worse */
+
+ fb_div &= fb_mask;
+
+ /* calc vclk divider with current vco freq */
+ vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
+ pd_min, pd_even);
+ if (vclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc dclk divider with current vco freq */
+ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+ pd_min, pd_even);
+ if (vclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc score with current vco freq */
+ score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
+
+ /* determine if this vco setting is better than current optimal settings */
+ if (score < optimal_score) {
+ *optimal_fb_div = fb_div;
+ *optimal_vclk_div = vclk_div;
+ *optimal_dclk_div = dclk_div;
+ optimal_score = score;
+ if (optimal_score == 0)
+ break; /* it can't get better than this */
+ }
+ }
+
+ /* did we found a valid setup ? */
+ if (optimal_score == ~0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+ unsigned cg_upll_func_cntl)
+{
+ unsigned i;
+
+ /* make sure UPLL_CTLREQ is deasserted */
+ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+ mdelay(10);
+
+ /* assert UPLL_CTLREQ */
+ WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+ /* wait for CTLACK and CTLACK2 to get asserted */
+ for (i = 0; i < 100; ++i) {
+ uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+ if ((RREG32(cg_upll_func_cntl) & mask) == mask)
+ break;
+ mdelay(10);
+ }
+
+ /* deassert UPLL_CTLREQ */
+ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+ if (i == 100) {
+ DRM_ERROR("Timeout setting UVD clocks!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5a0fc74..46fa1b0 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
- if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
+ if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ }
+
+ while (!avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 5706d2a..ab4c86c 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -148,6 +148,8 @@ void rs690_pm_info(struct radeon_device *rdev)
static void rs690_mc_init(struct radeon_device *rdev)
{
u64 base;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
rs400_gart_adjust_size(rdev);
rdev->mc.vram_is_ddr = true;
@@ -160,6 +162,27 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+
rs690_pm_info(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 36e6398..8af3ccf 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -29,6 +29,9 @@
#define __RS690D_H__
/* Registers */
+#define R_00001E_K8_FB_LOCATION 0x00001E
+#define R_00005F_MC_MISC_UMA_CNTL 0x00005F
+#define G_00005F_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
#define R_000078_MC_INDEX 0x000078
#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 435ed35..ffcba73 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_EN;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* update crtc base addresses */
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->family >= CHIP_RV770) {
- if (i == 1) {
+ if (i == 0) {
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
upper_32_bits(rdev->mc.vram_start));
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
}
WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
if (rdev->family >= CHIP_R600) {
/* unblackout the MC */
if (rdev->family >= CHIP_RV770)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d63fe1d..83f612a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,739 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* RV740 uses evergreen uvd clk programming */
+ if (rdev->family == CHIP_RV740)
+ return evergreen_set_uvd_clocks(rdev, vclk, dclk);
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+ 43663, 0x03FFFFFE, 1, 30, ~0,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ fb_div |= 1;
+ vclk_div -= 1;
+ dclk_div -= 1;
+
+ /* set UPLL_FB_DIV to 0x50000 */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
+
+ /* deassert UPLL_RESET and UPLL_SLEEP */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
+
+ /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* set the required FB_DIV, REF_DIV, Post divder values */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_SW_HILEN(vclk_div >> 1) |
+ UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+ UPLL_SW_HILEN2(dclk_div >> 1) |
+ UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
+ ~UPLL_SW_MASK);
+
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
+ ~UPLL_FB_DIV_MASK);
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
+
+static const u32 r7xx_golden_registers[] =
+{
+ 0x8d00, 0xffffffff, 0x0e0e0074,
+ 0x8d04, 0xffffffff, 0x013a2b34,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x8b20, 0xffffffff, 0,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x28350, 0xffffffff, 0,
+ 0x9058, 0xffffffff, 0x0fffc40f,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x733c, 0xffffffff, 0x00000002,
+ 0x2650, 0x00040000, 0,
+ 0x20bc, 0x00040000, 0,
+ 0x7300, 0xffffffff, 0x001000f0
+};
+
+static const u32 r7xx_golden_dyn_gpr_registers[] =
+{
+ 0x8db0, 0xffffffff, 0x98989898,
+ 0x8db4, 0xffffffff, 0x98989898,
+ 0x8db8, 0xffffffff, 0x98989898,
+ 0x8dbc, 0xffffffff, 0x98989898,
+ 0x8dc0, 0xffffffff, 0x98989898,
+ 0x8dc4, 0xffffffff, 0x98989898,
+ 0x8dc8, 0xffffffff, 0x98989898,
+ 0x8dcc, 0xffffffff, 0x98989898,
+ 0x88c4, 0xffffffff, 0x00000082
+};
+
+static const u32 rv770_golden_registers[] =
+{
+ 0x562c, 0xffffffff, 0,
+ 0x3f90, 0xffffffff, 0,
+ 0x9148, 0xffffffff, 0,
+ 0x3f94, 0xffffffff, 0,
+ 0x914c, 0xffffffff, 0,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770ce_golden_registers[] =
+{
+ 0x562c, 0xffffffff, 0,
+ 0x3f90, 0xffffffff, 0x00cc0000,
+ 0x9148, 0xffffffff, 0x00cc0000,
+ 0x3f94, 0xffffffff, 0x00cc0000,
+ 0x914c, 0xffffffff, 0x00cc0000,
+ 0x9b7c, 0xffffffff, 0x00fa0000,
+ 0x3f8c, 0xffffffff, 0x00fa0000,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x130300f9,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10002,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10003,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x9,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x9,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x8,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x9,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9160, 0xffffffff, 0x00040003,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00080007,
+ 0x9174, 0xffffffff, 0x000a0009,
+ 0x9178, 0xffffffff, 0x000c000b,
+ 0x917c, 0xffffffff, 0x000e000d,
+ 0x9180, 0xffffffff, 0x0010000f,
+ 0x918c, 0xffffffff, 0x00120011,
+ 0x9190, 0xffffffff, 0x00140013,
+ 0x9194, 0xffffffff, 0x00020001,
+ 0x9198, 0xffffffff, 0x00040003,
+ 0x919c, 0xffffffff, 0x00060005,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000a0009,
+ 0x91b0, 0xffffffff, 0x000c000b,
+ 0x91b4, 0xffffffff, 0x000e000d,
+ 0x91b8, 0xffffffff, 0x0010000f,
+ 0x91c4, 0xffffffff, 0x00120011,
+ 0x91c8, 0xffffffff, 0x00140013,
+ 0x91cc, 0xffffffff, 0x00020001,
+ 0x91d0, 0xffffffff, 0x00040003,
+ 0x91d4, 0xffffffff, 0x00060005,
+ 0x91e0, 0xffffffff, 0x00080007,
+ 0x91e4, 0xffffffff, 0x000a0009,
+ 0x91e8, 0xffffffff, 0x000c000b,
+ 0x91ec, 0xffffffff, 0x00020001,
+ 0x91f0, 0xffffffff, 0x00040003,
+ 0x91f4, 0xffffffff, 0x00060005,
+ 0x9200, 0xffffffff, 0x00080007,
+ 0x9204, 0xffffffff, 0x000a0009,
+ 0x9208, 0xffffffff, 0x000c000b,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x0010000f,
+ 0x921c, 0xffffffff, 0x00120011,
+ 0x9220, 0xffffffff, 0x00140013,
+ 0x9224, 0xffffffff, 0x00020001,
+ 0x9228, 0xffffffff, 0x00040003,
+ 0x922c, 0xffffffff, 0x00060005,
+ 0x9238, 0xffffffff, 0x00080007,
+ 0x923c, 0xffffffff, 0x000a0009,
+ 0x9240, 0xffffffff, 0x000c000b,
+ 0x9244, 0xffffffff, 0x000e000d,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x9254, 0xffffffff, 0x00120011,
+ 0x9258, 0xffffffff, 0x00140013,
+ 0x925c, 0xffffffff, 0x00020001,
+ 0x9260, 0xffffffff, 0x00040003,
+ 0x9264, 0xffffffff, 0x00060005,
+ 0x9270, 0xffffffff, 0x00080007,
+ 0x9274, 0xffffffff, 0x000a0009,
+ 0x9278, 0xffffffff, 0x000c000b,
+ 0x927c, 0xffffffff, 0x000e000d,
+ 0x9280, 0xffffffff, 0x0010000f,
+ 0x928c, 0xffffffff, 0x00120011,
+ 0x9290, 0xffffffff, 0x00140013,
+ 0x9294, 0xffffffff, 0x00020001,
+ 0x929c, 0xffffffff, 0x00040003,
+ 0x92a0, 0xffffffff, 0x00060005,
+ 0x92a4, 0xffffffff, 0x00080007
+};
+
+static const u32 rv710_golden_registers[] =
+{
+ 0x3f90, 0x00ff0000, 0x00fc0000,
+ 0x9148, 0x00ff0000, 0x00fc0000,
+ 0x3f94, 0x00ff0000, 0x00fc0000,
+ 0x914c, 0x00ff0000, 0x00fc0000,
+ 0xb4c, 0x00000020, 0x00000020,
+ 0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv710_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x13030040,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9174, 0xffffffff, 0x00000003,
+ 0x9178, 0xffffffff, 0x00050001,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00000004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050001,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00000004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000001,
+ 0x9294, 0xffffffff, 0x00000001,
+ 0x929c, 0xffffffff, 0x00000002,
+ 0x92a0, 0xffffffff, 0x00040003,
+ 0x9150, 0xffffffff, 0x4d940000
+};
+
+static const u32 rv730_golden_registers[] =
+{
+ 0x3f90, 0x00ff0000, 0x00f00000,
+ 0x9148, 0x00ff0000, 0x00f00000,
+ 0x3f94, 0x00ff0000, 0x00f00000,
+ 0x914c, 0x00ff0000, 0x00f00000,
+ 0x900c, 0xffffffff, 0x003b033f,
+ 0xb4c, 0x00000020, 0x00000020,
+ 0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv730_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x130300f9,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x916c, 0xffffffff, 0x00040003,
+ 0x9170, 0xffffffff, 0x00000005,
+ 0x9178, 0xffffffff, 0x00050001,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00000004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050001,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00000004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91b0, 0xffffffff, 0x00050001,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91c4, 0xffffffff, 0x00000004,
+ 0x91c8, 0xffffffff, 0x00070006,
+ 0x91cc, 0xffffffff, 0x00050001,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91e0, 0xffffffff, 0x00000004,
+ 0x91e4, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000001,
+ 0x91ec, 0xffffffff, 0x00050001,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x9200, 0xffffffff, 0x00000004,
+ 0x9204, 0xffffffff, 0x00070006,
+ 0x9208, 0xffffffff, 0x00050001,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x921c, 0xffffffff, 0x00000004,
+ 0x9220, 0xffffffff, 0x00070006,
+ 0x9224, 0xffffffff, 0x00050001,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x9238, 0xffffffff, 0x00000004,
+ 0x923c, 0xffffffff, 0x00070006,
+ 0x9240, 0xffffffff, 0x00050001,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9254, 0xffffffff, 0x00000004,
+ 0x9258, 0xffffffff, 0x00070006,
+ 0x9294, 0xffffffff, 0x00000001,
+ 0x929c, 0xffffffff, 0x00000002,
+ 0x92a0, 0xffffffff, 0x00040003,
+ 0x92a4, 0xffffffff, 0x00000005
+};
+
+static const u32 rv740_golden_registers[] =
+{
+ 0x88c4, 0xffffffff, 0x00000082,
+ 0x28a50, 0xfffffffc, 0x00000004,
+ 0x2650, 0x00040000, 0,
+ 0x20bc, 0x00040000, 0,
+ 0x733c, 0xffffffff, 0x00000002,
+ 0x7300, 0xffffffff, 0x001000f0,
+ 0x3f90, 0x00ff0000, 0,
+ 0x9148, 0x00ff0000, 0,
+ 0x3f94, 0x00ff0000, 0,
+ 0x914c, 0x00ff0000, 0,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8a14, 0x00000007, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0xffffffff, 0x00004000,
+ 0xa180, 0xffffffff, 0x00003f3f,
+ 0x8d00, 0xffffffff, 0x0e0e003a,
+ 0x8d04, 0xffffffff, 0x013a0e2a,
+ 0x8c00, 0xffffffff, 0xe400000f,
+ 0x8db0, 0xffffffff, 0x98989898,
+ 0x8db4, 0xffffffff, 0x98989898,
+ 0x8db8, 0xffffffff, 0x98989898,
+ 0x8dbc, 0xffffffff, 0x98989898,
+ 0x8dc0, 0xffffffff, 0x98989898,
+ 0x8dc4, 0xffffffff, 0x98989898,
+ 0x8dc8, 0xffffffff, 0x98989898,
+ 0x8dcc, 0xffffffff, 0x98989898,
+ 0x9058, 0xffffffff, 0x0fffc40f,
+ 0x900c, 0xffffffff, 0x003b033f,
+ 0x28350, 0xffffffff, 0,
+ 0x8cf0, 0x1fffffff, 0x08e00420,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv740_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x13030100,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x100,
+ 0x8b28, 0xffffffff, 0x100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10002,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10003,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9160, 0xffffffff, 0x00040003,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00080007,
+ 0x9174, 0xffffffff, 0x000a0009,
+ 0x9178, 0xffffffff, 0x000c000b,
+ 0x917c, 0xffffffff, 0x000e000d,
+ 0x9180, 0xffffffff, 0x0010000f,
+ 0x918c, 0xffffffff, 0x00120011,
+ 0x9190, 0xffffffff, 0x00140013,
+ 0x9194, 0xffffffff, 0x00020001,
+ 0x9198, 0xffffffff, 0x00040003,
+ 0x919c, 0xffffffff, 0x00060005,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000a0009,
+ 0x91b0, 0xffffffff, 0x000c000b,
+ 0x91b4, 0xffffffff, 0x000e000d,
+ 0x91b8, 0xffffffff, 0x0010000f,
+ 0x91c4, 0xffffffff, 0x00120011,
+ 0x91c8, 0xffffffff, 0x00140013,
+ 0x91cc, 0xffffffff, 0x00020001,
+ 0x91d0, 0xffffffff, 0x00040003,
+ 0x91d4, 0xffffffff, 0x00060005,
+ 0x91e0, 0xffffffff, 0x00080007,
+ 0x91e4, 0xffffffff, 0x000a0009,
+ 0x91e8, 0xffffffff, 0x000c000b,
+ 0x91ec, 0xffffffff, 0x00020001,
+ 0x91f0, 0xffffffff, 0x00040003,
+ 0x91f4, 0xffffffff, 0x00060005,
+ 0x9200, 0xffffffff, 0x00080007,
+ 0x9204, 0xffffffff, 0x000a0009,
+ 0x9208, 0xffffffff, 0x000c000b,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x0010000f,
+ 0x921c, 0xffffffff, 0x00120011,
+ 0x9220, 0xffffffff, 0x00140013,
+ 0x9224, 0xffffffff, 0x00020001,
+ 0x9228, 0xffffffff, 0x00040003,
+ 0x922c, 0xffffffff, 0x00060005,
+ 0x9238, 0xffffffff, 0x00080007,
+ 0x923c, 0xffffffff, 0x000a0009,
+ 0x9240, 0xffffffff, 0x000c000b,
+ 0x9244, 0xffffffff, 0x000e000d,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x9254, 0xffffffff, 0x00120011,
+ 0x9258, 0xffffffff, 0x00140013,
+ 0x9294, 0xffffffff, 0x00020001,
+ 0x929c, 0xffffffff, 0x00040003,
+ 0x92a0, 0xffffffff, 0x00060005,
+ 0x92a4, 0xffffffff, 0x00080007
+};
+
+static void rv770_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_RV770:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ if (rdev->pdev->device == 0x994e)
+ radeon_program_register_sequence(rdev,
+ rv770ce_golden_registers,
+ (const u32)ARRAY_SIZE(rv770ce_golden_registers));
+ else
+ radeon_program_register_sequence(rdev,
+ rv770_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv770_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV730:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ radeon_program_register_sequence(rdev,
+ rv730_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv730_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV710:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ radeon_program_register_sequence(rdev,
+ rv710_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv710_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV740:
+ radeon_program_register_sequence(rdev,
+ rv740_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv740_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ default:
+ break;
+ }
+}
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
@@ -68,6 +801,105 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
+int rv770_uvd_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t chip_id, size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ /* tell firmware which hardware it is running on */
+ switch (rdev->family) {
+ default:
+ return -EINVAL;
+ case CHIP_RV710:
+ chip_id = 0x01000005;
+ break;
+ case CHIP_RV730:
+ chip_id = 0x01000006;
+ break;
+ case CHIP_RV740:
+ chip_id = 0x01000007;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_id = 0x01000008;
+ break;
+ case CHIP_JUNIPER:
+ chip_id = 0x01000009;
+ break;
+ case CHIP_REDWOOD:
+ chip_id = 0x0100000a;
+ break;
+ case CHIP_CEDAR:
+ chip_id = 0x0100000b;
+ break;
+ case CHIP_SUMO:
+ chip_id = 0x0100000c;
+ break;
+ case CHIP_SUMO2:
+ chip_id = 0x0100000d;
+ break;
+ case CHIP_PALM:
+ chip_id = 0x0100000e;
+ break;
+ case CHIP_CAYMAN:
+ chip_id = 0x0100000f;
+ break;
+ case CHIP_BARTS:
+ chip_id = 0x01000010;
+ break;
+ case CHIP_TURKS:
+ chip_id = 0x01000011;
+ break;
+ case CHIP_CAICOS:
+ chip_id = 0x01000012;
+ break;
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+ chip_id = 0x01000017;
+ break;
+ }
+ WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+ return 0;
+}
+
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -611,6 +1443,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+ if (rdev->family == CHIP_RV730) {
+ WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ }
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -840,7 +1677,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = 0xFFFFFFFF - mc->gtt_end;
+ size_af = mc->mc_mask - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1040,6 +1877,17 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1074,6 +1922,19 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1100,6 +1961,9 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ rv770_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
@@ -1115,6 +1979,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
r600_irq_suspend(rdev);
@@ -1156,6 +2021,8 @@ int rv770_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ rv770_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -1190,6 +2057,13 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+ 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1224,6 +2098,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
@@ -1264,23 +2139,23 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW |
LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -1293,29 +2168,29 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
WREG16(0x4088, link_cntl2);
WREG32(MM_CFGREGS_CNTL, 0);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index c55f950..85b1626 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -38,6 +38,30 @@
#define R7XX_MAX_PIPES 8
#define R7XX_MAX_PIPES_MASK 0xff
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x718
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_REF_DIV(x) ((x) << 16)
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x71c
+# define UPLL_SW_HILEN(x) ((x) << 0)
+# define UPLL_SW_LOLEN(x) ((x) << 4)
+# define UPLL_SW_HILEN2(x) ((x) << 8)
+# define UPLL_SW_LOLEN2(x) ((x) << 12)
+# define UPLL_SW_MASK 0x0000FFFF
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x720
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+
/* Registers */
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
@@ -112,6 +136,11 @@
#define DMA_TILING_CONFIG 0x3ec8
#define DMA_TILING_CONFIG2 0xd0b8
+/* RV730 only */
+#define UVD_UDEC_TILING_CONFIG 0xef40
+#define UVD_UDEC_DB_TILING_CONFIG 0xef44
+#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -671,4 +700,18 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/* UVD */
+#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_VCPU_CHIP_ID 0xf4d4
+#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
+#define UVD_VCPU_CACHE_SIZE0 0xf4dc
+#define UVD_VCPU_CACHE_OFFSET1 0xf4e0
+#define UVD_VCPU_CACHE_SIZE1 0xf4e4
+#define UVD_VCPU_CACHE_OFFSET2 0xf4e8
+#define UVD_VCPU_CACHE_SIZE2 0xf4ec
+#define UVD_LMI_ADDR_EXT 0xf594
+
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+
#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index bafbe32..f0b6c2f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -70,6 +70,794 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+static const u32 tahiti_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601005,
+ 0xc47c, 0xffffffff, 0x10104040,
+ 0xc488, 0xffffffff, 0x0100000a,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000f4,
+ 0xf4a8, 0xffffffff, 0x00000000
+};
+
+static const u32 tahiti_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x277c, 0x00000003, 0x000007ff,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x2a00126a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x00000200, 0x000002fb,
+ 0xac10, 0xffffffff, 0x0000543b,
+ 0xac0c, 0xffffffff, 0xa9210876,
+ 0x88d0, 0xffffffff, 0x000fff40,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x1410, 0x20000000, 0x20fffed8,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+ 0xc64, 0x00000001, 0x00000001
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601004,
+ 0xc47c, 0xffffffff, 0x10102020,
+ 0xc488, 0xffffffff, 0x01000020,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000a4
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x2a00126a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f7,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x32761054,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x033f1005,
+ 0xc47c, 0xffffffff, 0x10808020,
+ 0xc488, 0xffffffff, 0x00800008,
+ 0xc314, 0xffffffff, 0x00001000,
+ 0xc30c, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601005,
+ 0xc47c, 0xffffffff, 0x10104040,
+ 0xc488, 0xffffffff, 0x0100000a,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000f4
+};
+
+static const u32 oland_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x00000082,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f3,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00003210,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x91d8, 0xffffffff, 0x00070006,
+ 0x91dc, 0xffffffff, 0x00090008,
+ 0x91e0, 0xffffffff, 0x0000000c,
+ 0x91e4, 0xffffffff, 0x000b000a,
+ 0x91e8, 0xffffffff, 0x000e000d,
+ 0x91ec, 0xffffffff, 0x00080007,
+ 0x91f0, 0xffffffff, 0x000a0009,
+ 0x91f4, 0xffffffff, 0x0000000d,
+ 0x91f8, 0xffffffff, 0x000c000b,
+ 0x91fc, 0xffffffff, 0x000f000e,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9264, 0xffffffff, 0x000e000d,
+ 0x9268, 0xffffffff, 0x0010000f,
+ 0x926c, 0xffffffff, 0x00000013,
+ 0x9270, 0xffffffff, 0x00120011,
+ 0x9274, 0xffffffff, 0x00150014,
+ 0x9278, 0xffffffff, 0x000f000e,
+ 0x927c, 0xffffffff, 0x00110010,
+ 0x9280, 0xffffffff, 0x00000014,
+ 0x9284, 0xffffffff, 0x00130012,
+ 0x9288, 0xffffffff, 0x00160015,
+ 0x928c, 0xffffffff, 0x0010000f,
+ 0x9290, 0xffffffff, 0x00120011,
+ 0x9294, 0xffffffff, 0x00000015,
+ 0x9298, 0xffffffff, 0x00140013,
+ 0x929c, 0xffffffff, 0x00170016,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 verde_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 oland_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static u32 verde_pg_init[] =
+{
+ 0x353c, 0xffffffff, 0x40000,
+ 0x3538, 0xffffffff, 0x200010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x7007,
+ 0x3538, 0xffffffff, 0x300010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x400000,
+ 0x3538, 0xffffffff, 0x100010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x120200,
+ 0x3538, 0xffffffff, 0x500010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x1e1e16,
+ 0x3538, 0xffffffff, 0x600010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x171f1e,
+ 0x3538, 0xffffffff, 0x700010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x3538, 0xffffffff, 0x9ff,
+ 0x3500, 0xffffffff, 0x0,
+ 0x3504, 0xffffffff, 0x10000800,
+ 0x3504, 0xffffffff, 0xf,
+ 0x3504, 0xffffffff, 0xf,
+ 0x3500, 0xffffffff, 0x4,
+ 0x3504, 0xffffffff, 0x1000051e,
+ 0x3504, 0xffffffff, 0xffff,
+ 0x3504, 0xffffffff, 0xffff,
+ 0x3500, 0xffffffff, 0x8,
+ 0x3504, 0xffffffff, 0x80500,
+ 0x3500, 0xffffffff, 0x12,
+ 0x3504, 0xffffffff, 0x9050c,
+ 0x3500, 0xffffffff, 0x1d,
+ 0x3504, 0xffffffff, 0xb052c,
+ 0x3500, 0xffffffff, 0x2a,
+ 0x3504, 0xffffffff, 0x1053e,
+ 0x3500, 0xffffffff, 0x2d,
+ 0x3504, 0xffffffff, 0x10546,
+ 0x3500, 0xffffffff, 0x30,
+ 0x3504, 0xffffffff, 0xa054e,
+ 0x3500, 0xffffffff, 0x3c,
+ 0x3504, 0xffffffff, 0x1055f,
+ 0x3500, 0xffffffff, 0x3f,
+ 0x3504, 0xffffffff, 0x10567,
+ 0x3500, 0xffffffff, 0x42,
+ 0x3504, 0xffffffff, 0x1056f,
+ 0x3500, 0xffffffff, 0x45,
+ 0x3504, 0xffffffff, 0x10572,
+ 0x3500, 0xffffffff, 0x48,
+ 0x3504, 0xffffffff, 0x20575,
+ 0x3500, 0xffffffff, 0x4c,
+ 0x3504, 0xffffffff, 0x190801,
+ 0x3500, 0xffffffff, 0x67,
+ 0x3504, 0xffffffff, 0x1082a,
+ 0x3500, 0xffffffff, 0x6a,
+ 0x3504, 0xffffffff, 0x1b082d,
+ 0x3500, 0xffffffff, 0x87,
+ 0x3504, 0xffffffff, 0x310851,
+ 0x3500, 0xffffffff, 0xba,
+ 0x3504, 0xffffffff, 0x891,
+ 0x3500, 0xffffffff, 0xbc,
+ 0x3504, 0xffffffff, 0x893,
+ 0x3500, 0xffffffff, 0xbe,
+ 0x3504, 0xffffffff, 0x20895,
+ 0x3500, 0xffffffff, 0xc2,
+ 0x3504, 0xffffffff, 0x20899,
+ 0x3500, 0xffffffff, 0xc6,
+ 0x3504, 0xffffffff, 0x2089d,
+ 0x3500, 0xffffffff, 0xca,
+ 0x3504, 0xffffffff, 0x8a1,
+ 0x3500, 0xffffffff, 0xcc,
+ 0x3504, 0xffffffff, 0x8a3,
+ 0x3500, 0xffffffff, 0xce,
+ 0x3504, 0xffffffff, 0x308a5,
+ 0x3500, 0xffffffff, 0xd3,
+ 0x3504, 0xffffffff, 0x6d08cd,
+ 0x3500, 0xffffffff, 0x142,
+ 0x3504, 0xffffffff, 0x2000095a,
+ 0x3504, 0xffffffff, 0x1,
+ 0x3500, 0xffffffff, 0x144,
+ 0x3504, 0xffffffff, 0x301f095b,
+ 0x3500, 0xffffffff, 0x165,
+ 0x3504, 0xffffffff, 0xc094d,
+ 0x3500, 0xffffffff, 0x173,
+ 0x3504, 0xffffffff, 0xf096d,
+ 0x3500, 0xffffffff, 0x184,
+ 0x3504, 0xffffffff, 0x15097f,
+ 0x3500, 0xffffffff, 0x19b,
+ 0x3504, 0xffffffff, 0xc0998,
+ 0x3500, 0xffffffff, 0x1a9,
+ 0x3504, 0xffffffff, 0x409a7,
+ 0x3500, 0xffffffff, 0x1af,
+ 0x3504, 0xffffffff, 0xcdc,
+ 0x3500, 0xffffffff, 0x1b1,
+ 0x3504, 0xffffffff, 0x800,
+ 0x3508, 0xffffffff, 0x6c9b2000,
+ 0x3510, 0xfc00, 0x2000,
+ 0x3544, 0xffffffff, 0xfc0,
+ 0x28d4, 0x00000100, 0x100
+};
+
+static void si_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers));
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ tahiti_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_registers2,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+ break;
+ case CHIP_PITCAIRN:
+ radeon_program_register_sequence(rdev,
+ pitcairn_golden_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+ radeon_program_register_sequence(rdev,
+ pitcairn_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ break;
+ case CHIP_VERDE:
+ radeon_program_register_sequence(rdev,
+ verde_golden_registers,
+ (const u32)ARRAY_SIZE(verde_golden_registers));
+ radeon_program_register_sequence(rdev,
+ verde_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ verde_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ verde_pg_init,
+ (const u32)ARRAY_SIZE(verde_pg_init));
+ break;
+ case CHIP_OLAND:
+ radeon_program_register_sequence(rdev,
+ oland_golden_registers,
+ (const u32)ARRAY_SIZE(oland_golden_registers));
+ radeon_program_register_sequence(rdev,
+ oland_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ break;
+ default:
+ break;
+ }
+}
+
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
@@ -1211,6 +1999,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if ((rdev->family == CHIP_VERDE) ||
@@ -1451,6 +2240,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -1463,7 +2253,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
u32 data = INSTANCE_BROADCAST_WRITES;
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
- data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
else if (se_num == 0xffffffff)
data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
else if (sh_num == 0xffffffff)
@@ -1765,9 +2555,13 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
si_tiling_mode_table_init(rdev);
@@ -2538,46 +3332,6 @@ static void si_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-/* SI MC address space is 40 bits */
-static void si_vram_location(struct radeon_device *rdev,
- struct radeon_mc *mc, u64 base)
-{
- mc->vram_start = base;
- if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
- dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
- mc->real_vram_size = mc->aper_size;
- mc->mc_vram_size = mc->aper_size;
- }
- mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
- mc->mc_vram_size >> 20, mc->vram_start,
- mc->vram_end, mc->real_vram_size >> 20);
-}
-
-static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
-{
- u64 size_af, size_bf;
-
- size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
- size_bf = mc->vram_start & ~mc->gtt_base_align;
- if (size_bf > size_af) {
- if (mc->gtt_size > size_bf) {
- dev_warn(rdev->dev, "limiting GTT\n");
- mc->gtt_size = size_bf;
- }
- mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
- } else {
- if (mc->gtt_size > size_af) {
- dev_warn(rdev->dev, "limiting GTT\n");
- mc->gtt_size = size_af;
- }
- mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
- }
- mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
- mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
-}
-
static void si_vram_gtt_location(struct radeon_device *rdev,
struct radeon_mc *mc)
{
@@ -2587,9 +3341,9 @@ static void si_vram_gtt_location(struct radeon_device *rdev,
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- si_vram_location(rdev, &rdev->mc, 0);
+ radeon_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
- si_gtt_location(rdev, mc);
+ radeon_gtt_location(rdev, mc);
}
static int si_mc_init(struct radeon_device *rdev)
@@ -4322,14 +5076,6 @@ static int si_startup(struct radeon_device *rdev)
return r;
si_gpu_init(rdev);
-#if 0
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-#endif
/* allocate rlc buffers */
r = si_rlc_init(rdev);
if (r) {
@@ -4372,6 +5118,16 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
@@ -4429,6 +5185,18 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -4455,6 +5223,9 @@ int si_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ si_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = si_startup(rdev);
if (r) {
@@ -4472,6 +5243,8 @@ int si_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
+ r600_uvd_rbc_stop(rdev);
+ radeon_uvd_suspend(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -4512,6 +5285,8 @@ int si_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ si_init_golden_registers(rdev);
/* Initialize scratch registers */
si_scratch_init(rdev);
/* Initialize surface registers */
@@ -4557,6 +5332,13 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -4594,9 +5376,6 @@ int si_init(struct radeon_device *rdev)
void si_fini(struct radeon_device *rdev)
{
-#if 0
- r600_blit_fini(rdev);
-#endif
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_irq_fini(rdev);
@@ -4605,6 +5384,7 @@ void si_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ radeon_uvd_fini(rdev);
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -4634,3 +5414,94 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
+
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* put PLL in bypass mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+ 16384, 0x03FFFFFF, 0, 128, 5,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ /* set RESET_ANTI_MUX to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+ /* set VCO_MODE to 1 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+ /* toggle UPLL_SLEEP to 1 then back to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+ /* deassert UPLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(1);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert UPLL_RESET again */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* disable spread spectrum. */
+ WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+ /* set feedback divider */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+ /* set ref divider to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+ if (fb_div < 307200)
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+ else
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+ /* set PDIV_A and PDIV_B */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+ ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* switch from bypass mode to normal mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 23fc08f..222877b 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -29,6 +29,35 @@
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x634
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_VCO_MODE_MASK 0x00000600
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x638
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x63C
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x644
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_FUNC_CNTL_5 0x648
+# define RESET_ANTI_MUX_MASK 0x00000200
+#define CG_UPLL_SPREAD_SPECTRUM 0x650
+# define SSEN_MASK 0x00000001
+
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -65,6 +94,8 @@
#define DMIF_ADDR_CONFIG 0xBD4
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
@@ -798,6 +829,15 @@
# define THREAD_TRACE_FINISH (55 << 0)
/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_RBC_RB_RPTR 0xF690
+#define UVD_RBC_RB_WPTR 0xF694
+
+/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index d917a41..7dff49e 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -494,10 +494,10 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
if (event) {
event->pipe = 0;
+ drm_vblank_get(dev, 0);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_vblank_get(dev, 0);
}
return 0;
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
deleted file mode 100644
index 80f73d1..0000000
--- a/drivers/gpu/drm/tegra/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-ccflags-y := -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
-
-tegra-drm-y := drm.o fb.o dc.o host1x.o
-tegra-drm-y += output.o rgb.o hdmi.o
-
-obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
deleted file mode 100644
index 9d452df..0000000
--- a/drivers/gpu/drm/tegra/drm.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include "drm.h"
-
-#define DRIVER_NAME "tegra"
-#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct device *dev = drm->dev;
- struct host1x *host1x;
- int err;
-
- host1x = dev_get_drvdata(dev);
- drm->dev_private = host1x;
- host1x->drm = drm;
-
- drm_mode_config_init(drm);
-
- err = host1x_drm_init(host1x, drm);
- if (err < 0)
- return err;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- return err;
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- return err;
-
- drm_kms_helper_poll_init(drm);
-
- return 0;
-}
-
-static int tegra_drm_unload(struct drm_device *drm)
-{
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
-
- drm_mode_config_cleanup(drm);
-
- return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
- return 0;
-}
-
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
- struct host1x *host1x = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(host1x->fbdev);
-}
-
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
-};
-
-static const struct file_operations tegra_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_cma_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
- return crtc;
- }
-
- return NULL;
-}
-
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
-{
- /* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
-}
-
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (!crtc)
- return -ENODEV;
-
- tegra_dc_enable_vblank(dc);
-
- return 0;
-}
-
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (crtc)
- tegra_dc_disable_vblank(dc);
-}
-
-static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- tegra_dc_cancel_page_flip(crtc, file);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)s->private;
- struct drm_device *drm = node->minor->dev;
- struct drm_framebuffer *fb;
-
- mutex_lock(&drm->mode_config.fb_lock);
-
- list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
- seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
- fb->base.id, fb->width, fb->height, fb->depth,
- fb->bits_per_pixel,
- atomic_read(&fb->refcount.refcount));
- }
-
- mutex_unlock(&drm->mode_config.fb_lock);
-
- return 0;
-}
-
-static struct drm_info_list tegra_debugfs_list[] = {
- { "framebuffers", tegra_debugfs_framebuffers, 0 },
-};
-
-static int tegra_debugfs_init(struct drm_minor *minor)
-{
- return drm_debugfs_create_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list),
- minor->debugfs_root, minor);
-}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list), minor);
-}
-#endif
-
-struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
- .open = tegra_drm_open,
- .preclose = tegra_drm_preclose,
- .lastclose = tegra_drm_lastclose,
-
- .get_vblank_counter = tegra_drm_get_vblank_counter,
- .enable_vblank = tegra_drm_enable_vblank,
- .disable_vblank = tegra_drm_disable_vblank,
-
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = tegra_debugfs_init,
- .debugfs_cleanup = tegra_debugfs_cleanup,
-#endif
-
- .gem_free_object = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
-
- .ioctls = tegra_drm_ioctls,
- .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
- .fops = &tegra_drm_fops,
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
deleted file mode 100644
index 0391495..0000000
--- a/drivers/gpu/drm/tegra/fb.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "drm.h"
-
-static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
-{
- struct host1x *host1x = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(host1x->fbdev);
-}
-
-static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
- .fb_create = drm_fb_cma_create,
- .output_poll_changed = tegra_drm_fb_output_poll_changed,
-};
-
-int tegra_drm_fb_init(struct drm_device *drm)
-{
- struct host1x *host1x = drm->dev_private;
- struct drm_fbdev_cma *fbdev;
-
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.funcs = &tegra_drm_mode_funcs;
-
- fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
- host1x->fbdev = fbdev;
-
- return 0;
-}
-
-void tegra_drm_fb_exit(struct drm_device *drm)
-{
- struct host1x *host1x = drm->dev_private;
-
- drm_fbdev_cma_fini(host1x->fbdev);
-}
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
deleted file mode 100644
index 92e25a7..0000000
--- a/drivers/gpu/drm/tegra/host1x.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-#include "drm.h"
-
-struct host1x_drm_client {
- struct host1x_client *client;
- struct device_node *np;
- struct list_head list;
-};
-
-static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
-{
- struct host1x_drm_client *client;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&client->list);
- client->np = of_node_get(np);
-
- list_add_tail(&client->list, &host1x->drm_clients);
-
- return 0;
-}
-
-static int host1x_activate_drm_client(struct host1x *host1x,
- struct host1x_drm_client *drm,
- struct host1x_client *client)
-{
- mutex_lock(&host1x->drm_clients_lock);
- list_del_init(&drm->list);
- list_add_tail(&drm->list, &host1x->drm_active);
- drm->client = client;
- mutex_unlock(&host1x->drm_clients_lock);
-
- return 0;
-}
-
-static int host1x_remove_drm_client(struct host1x *host1x,
- struct host1x_drm_client *client)
-{
- mutex_lock(&host1x->drm_clients_lock);
- list_del_init(&client->list);
- mutex_unlock(&host1x->drm_clients_lock);
-
- of_node_put(client->np);
- kfree(client);
-
- return 0;
-}
-
-static int host1x_parse_dt(struct host1x *host1x)
-{
- static const char * const compat[] = {
- "nvidia,tegra20-dc",
- "nvidia,tegra20-hdmi",
- "nvidia,tegra30-dc",
- "nvidia,tegra30-hdmi",
- };
- unsigned int i;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(compat); i++) {
- struct device_node *np;
-
- for_each_child_of_node(host1x->dev->of_node, np) {
- if (of_device_is_compatible(np, compat[i]) &&
- of_device_is_available(np)) {
- err = host1x_add_drm_client(host1x, np);
- if (err < 0)
- return err;
- }
- }
- }
-
- return 0;
-}
-
-static int tegra_host1x_probe(struct platform_device *pdev)
-{
- struct host1x *host1x;
- struct resource *regs;
- int err;
-
- host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
- if (!host1x)
- return -ENOMEM;
-
- mutex_init(&host1x->drm_clients_lock);
- INIT_LIST_HEAD(&host1x->drm_clients);
- INIT_LIST_HEAD(&host1x->drm_active);
- mutex_init(&host1x->clients_lock);
- INIT_LIST_HEAD(&host1x->clients);
- host1x->dev = &pdev->dev;
-
- err = host1x_parse_dt(host1x);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
- return err;
- }
-
- host1x->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host1x->clk))
- return PTR_ERR(host1x->clk);
-
- err = clk_prepare_enable(host1x->clk);
- if (err < 0)
- return err;
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- err = -ENXIO;
- goto err;
- }
-
- err = platform_get_irq(pdev, 0);
- if (err < 0)
- goto err;
-
- host1x->syncpt = err;
-
- err = platform_get_irq(pdev, 1);
- if (err < 0)
- goto err;
-
- host1x->irq = err;
-
- host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(host1x->regs)) {
- err = PTR_ERR(host1x->regs);
- goto err;
- }
-
- platform_set_drvdata(pdev, host1x);
-
- return 0;
-
-err:
- clk_disable_unprepare(host1x->clk);
- return err;
-}
-
-static int tegra_host1x_remove(struct platform_device *pdev)
-{
- struct host1x *host1x = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(host1x->clk);
-
- return 0;
-}
-
-int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
-{
- struct host1x_client *client;
-
- mutex_lock(&host1x->clients_lock);
-
- list_for_each_entry(client, &host1x->clients, list) {
- if (client->ops && client->ops->drm_init) {
- int err = client->ops->drm_init(client, drm);
- if (err < 0) {
- dev_err(host1x->dev,
- "DRM setup failed for %s: %d\n",
- dev_name(client->dev), err);
- return err;
- }
- }
- }
-
- mutex_unlock(&host1x->clients_lock);
-
- return 0;
-}
-
-int host1x_drm_exit(struct host1x *host1x)
-{
- struct platform_device *pdev = to_platform_device(host1x->dev);
- struct host1x_client *client;
-
- if (!host1x->drm)
- return 0;
-
- mutex_lock(&host1x->clients_lock);
-
- list_for_each_entry_reverse(client, &host1x->clients, list) {
- if (client->ops && client->ops->drm_exit) {
- int err = client->ops->drm_exit(client);
- if (err < 0) {
- dev_err(host1x->dev,
- "DRM cleanup failed for %s: %d\n",
- dev_name(client->dev), err);
- return err;
- }
- }
- }
-
- mutex_unlock(&host1x->clients_lock);
-
- drm_platform_exit(&tegra_drm_driver, pdev);
- host1x->drm = NULL;
-
- return 0;
-}
-
-int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
-{
- struct host1x_drm_client *drm, *tmp;
- int err;
-
- mutex_lock(&host1x->clients_lock);
- list_add_tail(&client->list, &host1x->clients);
- mutex_unlock(&host1x->clients_lock);
-
- list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
- if (drm->np == client->dev->of_node)
- host1x_activate_drm_client(host1x, drm, client);
-
- if (list_empty(&host1x->drm_clients)) {
- struct platform_device *pdev = to_platform_device(host1x->dev);
-
- err = drm_platform_init(&tegra_drm_driver, pdev);
- if (err < 0) {
- dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
- return err;
- }
- }
-
- client->host1x = host1x;
-
- return 0;
-}
-
-int host1x_unregister_client(struct host1x *host1x,
- struct host1x_client *client)
-{
- struct host1x_drm_client *drm, *tmp;
- int err;
-
- list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
- if (drm->client == client) {
- err = host1x_drm_exit(host1x);
- if (err < 0) {
- dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
- err);
- return err;
- }
-
- host1x_remove_drm_client(host1x, drm);
- break;
- }
- }
-
- mutex_lock(&host1x->clients_lock);
- list_del_init(&client->list);
- mutex_unlock(&host1x->clients_lock);
-
- return 0;
-}
-
-static struct of_device_id tegra_host1x_of_match[] = {
- { .compatible = "nvidia,tegra30-host1x", },
- { .compatible = "nvidia,tegra20-host1x", },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
-
-struct platform_driver tegra_host1x_driver = {
- .driver = {
- .name = "tegra-host1x",
- .owner = THIS_MODULE,
- .of_match_table = tegra_host1x_of_match,
- },
- .probe = tegra_host1x_probe,
- .remove = tegra_host1x_remove,
-};
-
-static int __init tegra_host1x_init(void)
-{
- int err;
-
- err = platform_driver_register(&tegra_host1x_driver);
- if (err < 0)
- return err;
-
- err = platform_driver_register(&tegra_dc_driver);
- if (err < 0)
- goto unregister_host1x;
-
- err = platform_driver_register(&tegra_hdmi_driver);
- if (err < 0)
- goto unregister_dc;
-
- return 0;
-
-unregister_dc:
- platform_driver_unregister(&tegra_dc_driver);
-unregister_host1x:
- platform_driver_unregister(&tegra_host1x_driver);
- return err;
-}
-module_init(tegra_host1x_init);
-
-static void __exit tegra_host1x_exit(void)
-{
- platform_driver_unregister(&tegra_hdmi_driver);
- platform_driver_unregister(&tegra_dc_driver);
- platform_driver_unregister(&tegra_host1x_driver);
-}
-module_exit(tegra_host1x_exit);
-
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index deda656..7d2eefe 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,4 +1,7 @@
-ccflags-y := -Iinclude/drm -Werror
+ccflags-y := -Iinclude/drm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ ccflags-y += -Werror
+endif
tilcdc-y := \
tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c5b592d..2b5461b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -75,7 +75,7 @@ static int modeset_init(struct drm_device *dev)
mod->funcs->modeset_init(mod, dev);
}
- if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
+ if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
/* oh nos! */
dev_err(dev->dev, "no encoders/connectors found\n");
return -ENXIO;
@@ -299,11 +299,10 @@ static int tilcdc_irq_postinstall(struct drm_device *dev)
struct tilcdc_drm_private *priv = dev->dev_private;
/* enable FIFO underflow irq: */
- if (priv->rev == 1) {
+ if (priv->rev == 1)
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
- } else {
+ else
tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
- }
return 0;
}
@@ -363,7 +362,7 @@ static const struct {
uint8_t rev;
uint8_t save;
uint32_t reg;
-} registers[] = {
+} registers[] = {
#define REG(rev, save, reg) { #reg, rev, save, reg }
/* exists in revision 1: */
REG(1, false, LCDC_PID_REG),
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 90ee497..0917665 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -305,7 +305,7 @@ static const struct tilcdc_module_ops panel_module_ops = {
*/
/* maybe move this somewhere common if it is needed by other outputs? */
-static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np)
+static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
{
struct device_node *info_np;
struct tilcdc_panel_info *info;
@@ -413,7 +413,6 @@ static struct of_device_id panel_of_match[] = {
{ .compatible = "ti,tilcdc,panel", },
{ },
};
-MODULE_DEVICE_TABLE(of, panel_of_match);
struct platform_driver panel_driver = {
.probe = panel_probe,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 568dc1c..db1d2fc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -353,7 +353,6 @@ static struct of_device_id slave_of_match[] = {
{ .compatible = "ti,tilcdc,slave", },
{ },
};
-MODULE_DEVICE_TABLE(of, slave_of_match);
struct platform_driver slave_driver = {
.probe = slave_probe,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 58d487b..a36788f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -396,7 +396,6 @@ static struct of_device_id tfp410_of_match[] = {
{ .compatible = "ti,tilcdc,tfp410", },
{ },
};
-MODULE_DEVICE_TABLE(of, tfp410_of_match);
struct platform_driver tfp410_driver = {
.probe = tfp410_probe,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 8be35c8..af89458 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -86,6 +86,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
+EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -94,6 +95,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
+EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -111,8 +113,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
return 0;
}
-static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
@@ -134,9 +137,10 @@ retry:
}
return ret;
}
+EXPORT_SYMBOL(ttm_mem_io_reserve);
-static void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -149,6 +153,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
+EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 74705f32..3df9f16 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -147,7 +147,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
- page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+ page_last = vma_pages(vma) +
bo->vm_node->start - vma->vm_pgoff;
if (unlikely(page_offset >= bo->num_pages)) {
@@ -258,7 +258,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
- (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+ vma_pages(vma));
if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
bo = NULL;
read_unlock(&bdev->vm_lock);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 9f4be3d..dc0c065 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -482,7 +482,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
- struct device *device = &dev->usbdev->dev;
+ struct device *device = dev->dev;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct udl_gem_object *obj;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 3816270..ef034fa 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
+ get_dma_buf(dma_buf);
+
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
@@ -322,5 +324,7 @@ fail_unmap:
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
new file mode 100644
index 0000000..ccfd42b
--- /dev/null
+++ b/drivers/gpu/host1x/Kconfig
@@ -0,0 +1,24 @@
+config TEGRA_HOST1X
+ tristate "NVIDIA Tegra host1x driver"
+ depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+ help
+ Driver for the NVIDIA Tegra host1x hardware.
+
+ The Tegra host1x module is the DMA engine for register access to
+ Tegra's graphics- and multimedia-related modules. The modules served
+ by host1x are referred to as clients. host1x includes some other
+ functionality, such as synchronization.
+
+if TEGRA_HOST1X
+
+config TEGRA_HOST1X_FIREWALL
+ bool "Enable HOST1X security firewall"
+ default y
+ help
+ Say yes if kernel should protect command streams from tampering.
+
+ If unsure, choose Y.
+
+source "drivers/gpu/host1x/drm/Kconfig"
+
+endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
new file mode 100644
index 0000000..3b037b6
--- /dev/null
+++ b/drivers/gpu/host1x/Makefile
@@ -0,0 +1,20 @@
+ccflags-y = -Idrivers/gpu/host1x
+
+host1x-y = \
+ syncpt.o \
+ dev.o \
+ intr.o \
+ cdma.o \
+ channel.o \
+ job.o \
+ debug.o \
+ hw/host1x01.o
+
+ccflags-y += -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
+obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
new file mode 100644
index 0000000..de72172
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.c
@@ -0,0 +1,491 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <asm/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <trace/events/host1x.h>
+
+#include "cdma.h"
+#include "channel.h"
+#include "dev.h"
+#include "debug.h"
+#include "host1x_bo.h"
+#include "job.h"
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == pos
+ * means that the push buffer is full, not empty.
+ */
+
+#define HOST1X_PUSHBUFFER_SLOTS 512
+
+/*
+ * Clean up push buffer resources
+ */
+static void host1x_pushbuffer_destroy(struct push_buffer *pb)
+{
+ struct host1x_cdma *cdma = pb_to_cdma(pb);
+ struct host1x *host1x = cdma_to_host1x(cdma);
+
+ if (pb->phys != 0)
+ dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
+ pb->mapped, pb->phys);
+
+ pb->mapped = NULL;
+ pb->phys = 0;
+}
+
+/*
+ * Init push buffer resources
+ */
+static int host1x_pushbuffer_init(struct push_buffer *pb)
+{
+ struct host1x_cdma *cdma = pb_to_cdma(pb);
+ struct host1x *host1x = cdma_to_host1x(cdma);
+
+ pb->mapped = NULL;
+ pb->phys = 0;
+ pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
+
+ /* initialize buffer pointers */
+ pb->fence = pb->size_bytes - 8;
+ pb->pos = 0;
+
+ /* allocate and map pushbuffer memory */
+ pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
+ &pb->phys, GFP_KERNEL);
+ if (!pb->mapped)
+ goto fail;
+
+ host1x_hw_pushbuffer_init(host1x, pb);
+
+ return 0;
+
+fail:
+ host1x_pushbuffer_destroy(pb);
+ return -ENOMEM;
+}
+
+/*
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
+{
+ u32 pos = pb->pos;
+ u32 *p = (u32 *)((u32)pb->mapped + pos);
+ WARN_ON(pos == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->pos = (pos + 8) & (pb->size_bytes - 1);
+}
+
+/*
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
+{
+ /* Advance the next write position */
+ pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
+}
+
+/*
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 host1x_pushbuffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
+}
+
+/*
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
+ enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space;
+
+ if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ space = list_empty(&cdma->sync_queue) ? 1 : 0;
+ else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
+ struct push_buffer *pb = &cdma->push_buffer;
+ space = host1x_pushbuffer_space(pb);
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (space)
+ return space;
+
+ trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
+ event);
+
+ /* If somebody has managed to already start waiting, yield */
+ if (cdma->event != CDMA_EVENT_NONE) {
+ mutex_unlock(&cdma->lock);
+ schedule();
+ mutex_lock(&cdma->lock);
+ continue;
+ }
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+ return 0;
+}
+
+/*
+ * Start timer that tracks the time spent by the job.
+ * Must be called with the cdma lock held.
+ */
+static void cdma_start_timer_locked(struct host1x_cdma *cdma,
+ struct host1x_job *job)
+{
+ struct host1x *host = cdma_to_host1x(cdma);
+
+ if (cdma->timeout.client) {
+ /* timer already started */
+ return;
+ }
+
+ cdma->timeout.client = job->client;
+ cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
+ cdma->timeout.syncpt_val = job->syncpt_end;
+ cdma->timeout.start_ktime = ktime_get();
+
+ schedule_delayed_work(&cdma->timeout.wq,
+ msecs_to_jiffies(job->timeout));
+}
+
+/*
+ * Stop timer when a buffer submission completes.
+ * Must be called with the cdma lock held.
+ */
+static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
+{
+ cancel_delayed_work(&cdma->timeout.wq);
+ cdma->timeout.client = 0;
+}
+
+/*
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma_locked(struct host1x_cdma *cdma)
+{
+ bool signal = false;
+ struct host1x *host1x = cdma_to_host1x(cdma);
+ struct host1x_job *job, *n;
+
+ /* If CDMA is stopped, queue is cleared and we can return */
+ if (!cdma->running)
+ return;
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
+ struct host1x_syncpt *sp =
+ host1x_syncpt_get(host1x, job->syncpt_id);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
+ /* Start timer on next pending syncpt */
+ if (job->timeout)
+ cdma_start_timer_locked(cdma, job);
+ break;
+ }
+
+ /* Cancel timeout, when a buffer completes */
+ if (cdma->timeout.client)
+ stop_cdma_timer_locked(cdma);
+
+ /* Unpin the memory */
+ host1x_job_unpin(job);
+
+ /* Pop push buffer slots */
+ if (job->num_slots) {
+ struct push_buffer *pb = &cdma->push_buffer;
+ host1x_pushbuffer_pop(pb, job->num_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ list_del(&job->list);
+ host1x_job_put(job);
+ }
+
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
+ list_empty(&cdma->sync_queue))
+ signal = true;
+
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
+ struct device *dev)
+{
+ u32 restart_addr;
+ u32 syncpt_incrs;
+ struct host1x_job *job = NULL;
+ u32 syncpt_val;
+ struct host1x *host1x = cdma_to_host1x(cdma);
+
+ syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
+
+ dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
+ __func__, syncpt_val);
+
+ /*
+ * Move the sync_queue read pointer to the first entry that hasn't
+ * completed based on the current HW syncpt value. It's likely there
+ * won't be any (i.e. we're still at the head), but covers the case
+ * where a syncpt incr happens just prior/during the teardown.
+ */
+
+ dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
+ __func__);
+
+ list_for_each_entry(job, &cdma->sync_queue, list) {
+ if (syncpt_val < job->syncpt_end)
+ break;
+
+ host1x_job_dump(dev, job);
+ }
+
+ /*
+ * Walk the sync_queue, first incrementing with the CPU syncpts that
+ * are partially executed (the first buffer) or fully skipped while
+ * still in the current context (slots are also NOP-ed).
+ *
+ * At the point contexts are interleaved, syncpt increments must be
+ * done inline with the pushbuffer from a GATHER buffer to maintain
+ * the order (slots are modified to be a GATHER of syncpt incrs).
+ *
+ * Note: save in restart_addr the location where the timed out buffer
+ * started in the PB, so we can start the refetch from there (with the
+ * modified NOP-ed PB slots). This lets things appear to have completed
+ * properly for this buffer and resources are freed.
+ */
+
+ dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
+ __func__);
+
+ if (!list_empty(&cdma->sync_queue))
+ restart_addr = job->first_get;
+ else
+ restart_addr = cdma->last_pos;
+
+ /* do CPU increments as long as this context continues */
+ list_for_each_entry_from(job, &cdma->sync_queue, list) {
+ /* different context, gets us out of this loop */
+ if (job->client != cdma->timeout.client)
+ break;
+
+ /* won't need a timeout when replayed */
+ job->timeout = 0;
+
+ syncpt_incrs = job->syncpt_end - syncpt_val;
+ dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
+
+ host1x_job_dump(dev, job);
+
+ /* safe to use CPU to incr syncpts */
+ host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
+ syncpt_incrs, job->syncpt_end,
+ job->num_slots);
+
+ syncpt_val += syncpt_incrs;
+ }
+
+ /* The following sumbits from the same client may be dependent on the
+ * failed submit and therefore they may fail. Force a small timeout
+ * to make the queue cleanup faster */
+
+ list_for_each_entry_from(job, &cdma->sync_queue, list)
+ if (job->client == cdma->timeout.client)
+ job->timeout = min_t(unsigned int, job->timeout, 500);
+
+ dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
+
+ /* roll back DMAGET and start up channel again */
+ host1x_hw_cdma_resume(host1x, cdma, restart_addr);
+}
+
+/*
+ * Create a cdma
+ */
+int host1x_cdma_init(struct host1x_cdma *cdma)
+{
+ int err;
+
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+
+ INIT_LIST_HEAD(&cdma->sync_queue);
+
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ cdma->torndown = false;
+
+ err = host1x_pushbuffer_init(&cdma->push_buffer);
+ if (err)
+ return err;
+ return 0;
+}
+
+/*
+ * Destroy a cdma
+ */
+int host1x_cdma_deinit(struct host1x_cdma *cdma)
+{
+ struct push_buffer *pb = &cdma->push_buffer;
+ struct host1x *host1x = cdma_to_host1x(cdma);
+
+ if (cdma->running) {
+ pr_warn("%s: CDMA still running\n", __func__);
+ return -EBUSY;
+ }
+
+ host1x_pushbuffer_destroy(pb);
+ host1x_hw_cdma_timeout_destroy(host1x, cdma);
+
+ return 0;
+}
+
+/*
+ * Begin a cdma submit
+ */
+int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
+{
+ struct host1x *host1x = cdma_to_host1x(cdma);
+
+ mutex_lock(&cdma->lock);
+
+ if (job->timeout) {
+ /* init state on first submit with timeout value */
+ if (!cdma->timeout.initialized) {
+ int err;
+ err = host1x_hw_cdma_timeout_init(host1x, cdma,
+ job->syncpt_id);
+ if (err) {
+ mutex_unlock(&cdma->lock);
+ return err;
+ }
+ }
+ }
+ if (!cdma->running)
+ host1x_hw_cdma_start(host1x, cdma);
+
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+ cdma->first_get = cdma->push_buffer.pos;
+
+ trace_host1x_cdma_begin(dev_name(job->channel->dev));
+ return 0;
+}
+
+/*
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
+{
+ struct host1x *host1x = cdma_to_host1x(cdma);
+ struct push_buffer *pb = &cdma->push_buffer;
+ u32 slots_free = cdma->slots_free;
+
+ if (host1x_debug_trace_cmdbuf)
+ trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
+ op1, op2);
+
+ if (slots_free == 0) {
+ host1x_hw_cdma_flush(host1x, cdma);
+ slots_free = host1x_cdma_wait_locked(cdma,
+ CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ host1x_pushbuffer_push(pb, op1, op2);
+}
+
+/*
+ * End a cdma submit
+ * Kick off DMA, add job to the sync queue, and a number of slots to be freed
+ * from the pushbuffer. The handles for a submit must all be pinned at the same
+ * time, but they can be unpinned in smaller chunks.
+ */
+void host1x_cdma_end(struct host1x_cdma *cdma,
+ struct host1x_job *job)
+{
+ struct host1x *host1x = cdma_to_host1x(cdma);
+ bool idle = list_empty(&cdma->sync_queue);
+
+ host1x_hw_cdma_flush(host1x, cdma);
+
+ job->first_get = cdma->first_get;
+ job->num_slots = cdma->slots_used;
+ host1x_job_get(job);
+ list_add_tail(&job->list, &cdma->sync_queue);
+
+ /* start timer on idle -> active transitions */
+ if (job->timeout && idle)
+ cdma_start_timer_locked(cdma, job);
+
+ trace_host1x_cdma_end(dev_name(job->channel->dev));
+ mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Update cdma state according to current sync point values
+ */
+void host1x_cdma_update(struct host1x_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma_locked(cdma);
+ mutex_unlock(&cdma->lock);
+}
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
new file mode 100644
index 0000000..313c4b7
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.h
@@ -0,0 +1,100 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_CDMA_H
+#define __HOST1X_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+
+struct host1x_syncpt;
+struct host1x_userctx_timeout;
+struct host1x_job;
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+struct push_buffer {
+ u32 *mapped; /* mapped pushbuffer memory */
+ dma_addr_t phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 pos; /* index to write to */
+ u32 size_bytes;
+};
+
+struct buffer_timeout {
+ struct delayed_work wq; /* work queue */
+ bool initialized; /* timer one-time setup flag */
+ struct host1x_syncpt *syncpt; /* buffer completion syncpt */
+ u32 syncpt_val; /* syncpt value when completed */
+ ktime_t start_ktime; /* starting time */
+ /* context timeout information */
+ int client;
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct host1x_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int first_get; /* DMAGET value, where submit begins */
+ unsigned int last_pos; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct list_head sync_queue; /* job queue */
+ struct buffer_timeout timeout; /* channel's timeout state/wq */
+ bool running;
+ bool torndown;
+};
+
+#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
+#define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
+#define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
+
+int host1x_cdma_init(struct host1x_cdma *cdma);
+int host1x_cdma_deinit(struct host1x_cdma *cdma);
+void host1x_cdma_stop(struct host1x_cdma *cdma);
+int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
+void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
+void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
+void host1x_cdma_update(struct host1x_cdma *cdma);
+void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
+ u32 *out);
+unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
+ enum cdma_event event);
+void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
+ struct device *dev);
+#endif
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
new file mode 100644
index 0000000..83ea51b
--- /dev/null
+++ b/drivers/gpu/host1x/channel.c
@@ -0,0 +1,126 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "channel.h"
+#include "dev.h"
+#include "job.h"
+
+/* Constructor for the host1x device list */
+int host1x_channel_list_init(struct host1x *host)
+{
+ INIT_LIST_HEAD(&host->chlist.list);
+ mutex_init(&host->chlist_mutex);
+
+ if (host->info->nb_channels > BITS_PER_LONG) {
+ WARN(1, "host1x hardware has more channels than supported by the driver\n");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+int host1x_job_submit(struct host1x_job *job)
+{
+ struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+
+ return host1x_hw_channel_submit(host, job);
+}
+
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
+{
+ int err = 0;
+
+ mutex_lock(&channel->reflock);
+
+ if (channel->refcount == 0)
+ err = host1x_cdma_init(&channel->cdma);
+
+ if (!err)
+ channel->refcount++;
+
+ mutex_unlock(&channel->reflock);
+
+ return err ? NULL : channel;
+}
+
+void host1x_channel_put(struct host1x_channel *channel)
+{
+ mutex_lock(&channel->reflock);
+
+ if (channel->refcount == 1) {
+ struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+ host1x_hw_cdma_stop(host, &channel->cdma);
+ host1x_cdma_deinit(&channel->cdma);
+ }
+
+ channel->refcount--;
+
+ mutex_unlock(&channel->reflock);
+}
+
+struct host1x_channel *host1x_channel_request(struct device *dev)
+{
+ struct host1x *host = dev_get_drvdata(dev->parent);
+ int max_channels = host->info->nb_channels;
+ struct host1x_channel *channel = NULL;
+ int index, err;
+
+ mutex_lock(&host->chlist_mutex);
+
+ index = find_first_zero_bit(&host->allocated_channels, max_channels);
+ if (index >= max_channels)
+ goto fail;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ goto fail;
+
+ err = host1x_hw_channel_init(host, channel, index);
+ if (err < 0)
+ goto fail;
+
+ /* Link device to host1x_channel */
+ channel->dev = dev;
+
+ /* Add to channel list */
+ list_add_tail(&channel->list, &host->chlist.list);
+
+ host->allocated_channels |= BIT(index);
+
+ mutex_unlock(&host->chlist_mutex);
+ return channel;
+
+fail:
+ dev_err(dev, "failed to init channel\n");
+ kfree(channel);
+ mutex_unlock(&host->chlist_mutex);
+ return NULL;
+}
+
+void host1x_channel_free(struct host1x_channel *channel)
+{
+ struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+ host->allocated_channels &= ~BIT(channel->id);
+ list_del(&channel->list);
+ kfree(channel);
+}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
new file mode 100644
index 0000000..48723b8
--- /dev/null
+++ b/drivers/gpu/host1x/channel.h
@@ -0,0 +1,52 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_CHANNEL_H
+#define __HOST1X_CHANNEL_H
+
+#include <linux/io.h>
+
+#include "cdma.h"
+
+struct host1x;
+
+struct host1x_channel {
+ struct list_head list;
+
+ unsigned int refcount;
+ unsigned int id;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *regs;
+ struct device *dev;
+ struct host1x_cdma cdma;
+};
+
+/* channel list operations */
+int host1x_channel_list_init(struct host1x *host);
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+#define host1x_for_each_channel(host, channel) \
+ list_for_each_entry(channel, &host->chlist.list, list)
+
+#endif
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
new file mode 100644
index 0000000..3ec7d77
--- /dev/null
+++ b/drivers/gpu/host1x/debug.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "channel.h"
+
+unsigned int host1x_debug_trace_cmdbuf;
+
+static pid_t host1x_debug_force_timeout_pid;
+static u32 host1x_debug_force_timeout_val;
+static u32 host1x_debug_force_timeout_channel;
+
+void host1x_debug_output(struct output *o, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, fmt);
+ len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+ va_end(args);
+ o->fn(o->ctx, o->buf, len);
+}
+
+static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
+{
+ struct host1x *m = dev_get_drvdata(ch->dev->parent);
+ struct output *o = data;
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount) {
+ mutex_lock(&ch->cdma.lock);
+ if (show_fifo)
+ host1x_hw_show_channel_fifo(m, ch, o);
+ host1x_hw_show_channel_cdma(m, ch, o);
+ mutex_unlock(&ch->cdma.lock);
+ }
+ mutex_unlock(&ch->reflock);
+
+ return 0;
+}
+
+static void show_syncpts(struct host1x *m, struct output *o)
+{
+ int i;
+ host1x_debug_output(o, "---- syncpts ----\n");
+ for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
+ u32 max = host1x_syncpt_read_max(m->syncpt + i);
+ u32 min = host1x_syncpt_load(m->syncpt + i);
+ if (!min && !max)
+ continue;
+ host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+ i, m->syncpt[i].name, min, max);
+ }
+
+ for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
+ u32 base_val;
+ base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
+ if (base_val)
+ host1x_debug_output(o, "waitbase id %d val %d\n", i,
+ base_val);
+ }
+
+ host1x_debug_output(o, "\n");
+}
+
+static void show_all(struct host1x *m, struct output *o)
+{
+ struct host1x_channel *ch;
+
+ host1x_hw_show_mlocks(m, o);
+ show_syncpts(m, o);
+ host1x_debug_output(o, "---- channels ----\n");
+
+ host1x_for_each_channel(m, ch)
+ show_channels(ch, o, true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void show_all_no_fifo(struct host1x *host1x, struct output *o)
+{
+ struct host1x_channel *ch;
+
+ host1x_hw_show_mlocks(host1x, o);
+ show_syncpts(host1x, o);
+ host1x_debug_output(o, "---- channels ----\n");
+
+ host1x_for_each_channel(host1x, ch)
+ show_channels(ch, o, false);
+}
+
+static int host1x_debug_show_all(struct seq_file *s, void *unused)
+{
+ struct output o = {
+ .fn = write_to_seqfile,
+ .ctx = s
+ };
+ show_all(s->private, &o);
+ return 0;
+}
+
+static int host1x_debug_show(struct seq_file *s, void *unused)
+{
+ struct output o = {
+ .fn = write_to_seqfile,
+ .ctx = s
+ };
+ show_all_no_fifo(s->private, &o);
+ return 0;
+}
+
+static int host1x_debug_open_all(struct inode *inode, struct file *file)
+{
+ return single_open(file, host1x_debug_show_all, inode->i_private);
+}
+
+static const struct file_operations host1x_debug_all_fops = {
+ .open = host1x_debug_open_all,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int host1x_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, host1x_debug_show, inode->i_private);
+}
+
+static const struct file_operations host1x_debug_fops = {
+ .open = host1x_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void host1x_debug_init(struct host1x *host1x)
+{
+ struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
+
+ if (!de)
+ return;
+
+ /* Store the created entry */
+ host1x->debugfs = de;
+
+ debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
+ debugfs_create_file("status_all", S_IRUGO, de, host1x,
+ &host1x_debug_all_fops);
+
+ debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
+ &host1x_debug_trace_cmdbuf);
+
+ host1x_hw_debug_init(host1x, de);
+
+ debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
+ &host1x_debug_force_timeout_pid);
+ debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
+ &host1x_debug_force_timeout_val);
+ debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
+ &host1x_debug_force_timeout_channel);
+}
+
+void host1x_debug_deinit(struct host1x *host1x)
+{
+ debugfs_remove_recursive(host1x->debugfs);
+}
+#else
+void host1x_debug_init(struct host1x *host1x)
+{
+}
+void host1x_debug_deinit(struct host1x *host1x)
+{
+}
+#endif
+
+void host1x_debug_dump(struct host1x *host1x)
+{
+ struct output o = {
+ .fn = write_to_printk
+ };
+ show_all(host1x, &o);
+}
+
+void host1x_debug_dump_syncpts(struct host1x *host1x)
+{
+ struct output o = {
+ .fn = write_to_printk
+ };
+ show_syncpts(host1x, &o);
+}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
new file mode 100644
index 0000000..4595b2e
--- /dev/null
+++ b/drivers/gpu/host1x/debug.h
@@ -0,0 +1,51 @@
+/*
+ * Tegra host1x Debug
+ *
+ * Copyright (c) 2011-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __HOST1X_DEBUG_H
+#define __HOST1X_DEBUG_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct host1x;
+
+struct output {
+ void (*fn)(void *ctx, const char *str, size_t len);
+ void *ctx;
+ char buf[256];
+};
+
+static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
+{
+ seq_write((struct seq_file *)ctx, str, len);
+}
+
+static inline void write_to_printk(void *ctx, const char *str, size_t len)
+{
+ pr_info("%s", str);
+}
+
+void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
+
+extern unsigned int host1x_debug_trace_cmdbuf;
+
+void host1x_debug_init(struct host1x *host1x);
+void host1x_debug_deinit(struct host1x *host1x);
+void host1x_debug_dump(struct host1x *host1x);
+void host1x_debug_dump_syncpts(struct host1x *host1x);
+
+#endif
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
new file mode 100644
index 0000000..28e28a2
--- /dev/null
+++ b/drivers/gpu/host1x/dev.c
@@ -0,0 +1,246 @@
+/*
+ * Tegra host1x driver
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/host1x.h>
+
+#include "dev.h"
+#include "intr.h"
+#include "channel.h"
+#include "debug.h"
+#include "hw/host1x01.h"
+#include "host1x_client.h"
+
+void host1x_set_drm_data(struct device *dev, void *data)
+{
+ struct host1x *host1x = dev_get_drvdata(dev);
+ host1x->drm_data = data;
+}
+
+void *host1x_get_drm_data(struct device *dev)
+{
+ struct host1x *host1x = dev_get_drvdata(dev);
+ return host1x->drm_data;
+}
+
+void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
+{
+ void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+ writel(v, sync_regs + r);
+}
+
+u32 host1x_sync_readl(struct host1x *host1x, u32 r)
+{
+ void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+ return readl(sync_regs + r);
+}
+
+void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
+{
+ writel(v, ch->regs + r);
+}
+
+u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
+{
+ return readl(ch->regs + r);
+}
+
+static const struct host1x_info host1x01_info = {
+ .nb_channels = 8,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 8,
+ .init = host1x01_init,
+ .sync_offset = 0x3000,
+};
+
+static struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
+ { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, host1x_of_match);
+
+static int host1x_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct host1x *host;
+ struct resource *regs;
+ int syncpt_irq;
+ int err;
+
+ id = of_match_device(host1x_of_match, &pdev->dev);
+ if (!id)
+ return -EINVAL;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
+
+ syncpt_irq = platform_get_irq(pdev, 0);
+ if (syncpt_irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return -ENXIO;
+ }
+
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->dev = &pdev->dev;
+ host->info = id->data;
+
+ /* set common host1x device data */
+ platform_set_drvdata(pdev, host);
+
+ host->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->regs))
+ return PTR_ERR(host->regs);
+
+ if (host->info->init) {
+ err = host->info->init(host);
+ if (err)
+ return err;
+ }
+
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ err = PTR_ERR(host->clk);
+ return err;
+ }
+
+ err = host1x_channel_list_init(host);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize channel list\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(host->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return err;
+ }
+
+ err = host1x_syncpt_init(host);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize syncpts\n");
+ return err;
+ }
+
+ err = host1x_intr_init(host, syncpt_irq);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize interrupts\n");
+ goto fail_deinit_syncpt;
+ }
+
+ host1x_debug_init(host);
+
+ host1x_drm_alloc(pdev);
+
+ return 0;
+
+fail_deinit_syncpt:
+ host1x_syncpt_deinit(host);
+ return err;
+}
+
+static int __exit host1x_remove(struct platform_device *pdev)
+{
+ struct host1x *host = platform_get_drvdata(pdev);
+
+ host1x_intr_deinit(host);
+ host1x_syncpt_deinit(host);
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static struct platform_driver tegra_host1x_driver = {
+ .probe = host1x_probe,
+ .remove = __exit_p(host1x_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra-host1x",
+ .of_match_table = host1x_of_match,
+ },
+};
+
+static int __init tegra_host1x_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&tegra_host1x_driver);
+ if (err < 0)
+ return err;
+
+#ifdef CONFIG_DRM_TEGRA
+ err = platform_driver_register(&tegra_dc_driver);
+ if (err < 0)
+ goto unregister_host1x;
+
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dc;
+
+ err = platform_driver_register(&tegra_gr2d_driver);
+ if (err < 0)
+ goto unregister_hdmi;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_DRM_TEGRA
+unregister_hdmi:
+ platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+ platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+ platform_driver_unregister(&tegra_host1x_driver);
+ return err;
+#endif
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+#ifdef CONFIG_DRM_TEGRA
+ platform_driver_unregister(&tegra_gr2d_driver);
+ platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dc_driver);
+#endif
+ platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
+MODULE_DESCRIPTION("Host1x driver for Tegra products");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
new file mode 100644
index 0000000..a1607d6
--- /dev/null
+++ b/drivers/gpu/host1x/dev.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_DEV_H
+#define HOST1X_DEV_H
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+#include "channel.h"
+#include "syncpt.h"
+#include "intr.h"
+#include "cdma.h"
+#include "job.h"
+
+struct host1x_syncpt;
+struct host1x_channel;
+struct host1x_cdma;
+struct host1x_job;
+struct push_buffer;
+struct output;
+struct dentry;
+
+struct host1x_channel_ops {
+ int (*init)(struct host1x_channel *channel, struct host1x *host,
+ unsigned int id);
+ int (*submit)(struct host1x_job *job);
+};
+
+struct host1x_cdma_ops {
+ void (*start)(struct host1x_cdma *cdma);
+ void (*stop)(struct host1x_cdma *cdma);
+ void (*flush)(struct host1x_cdma *cdma);
+ int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+ void (*timeout_destroy)(struct host1x_cdma *cdma);
+ void (*freeze)(struct host1x_cdma *cdma);
+ void (*resume)(struct host1x_cdma *cdma, u32 getptr);
+ void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
+ u32 syncpt_incrs, u32 syncval, u32 nr_slots);
+};
+
+struct host1x_pushbuffer_ops {
+ void (*init)(struct push_buffer *pb);
+};
+
+struct host1x_debug_ops {
+ void (*debug_init)(struct dentry *de);
+ void (*show_channel_cdma)(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o);
+ void (*show_channel_fifo)(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o);
+ void (*show_mlocks)(struct host1x *host, struct output *output);
+
+};
+
+struct host1x_syncpt_ops {
+ void (*restore)(struct host1x_syncpt *syncpt);
+ void (*restore_wait_base)(struct host1x_syncpt *syncpt);
+ void (*load_wait_base)(struct host1x_syncpt *syncpt);
+ u32 (*load)(struct host1x_syncpt *syncpt);
+ void (*cpu_incr)(struct host1x_syncpt *syncpt);
+ int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
+};
+
+struct host1x_intr_ops {
+ int (*init_host_sync)(struct host1x *host, u32 cpm,
+ void (*syncpt_thresh_work)(struct work_struct *work));
+ void (*set_syncpt_threshold)(
+ struct host1x *host, u32 id, u32 thresh);
+ void (*enable_syncpt_intr)(struct host1x *host, u32 id);
+ void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+ void (*disable_all_syncpt_intrs)(struct host1x *host);
+ int (*free_syncpt_irq)(struct host1x *host);
+};
+
+struct host1x_info {
+ int nb_channels; /* host1x: num channels supported */
+ int nb_pts; /* host1x: num syncpoints supported */
+ int nb_bases; /* host1x: num syncpoints supported */
+ int nb_mlocks; /* host1x: number of mlocks */
+ int (*init)(struct host1x *); /* initialize per SoC ops */
+ int sync_offset;
+};
+
+struct host1x {
+ const struct host1x_info *info;
+
+ void __iomem *regs;
+ struct host1x_syncpt *syncpt;
+ struct device *dev;
+ struct clk *clk;
+
+ struct mutex intr_mutex;
+ struct workqueue_struct *intr_wq;
+ int intr_syncpt_irq;
+
+ const struct host1x_syncpt_ops *syncpt_op;
+ const struct host1x_intr_ops *intr_op;
+ const struct host1x_channel_ops *channel_op;
+ const struct host1x_cdma_ops *cdma_op;
+ const struct host1x_pushbuffer_ops *cdma_pb_op;
+ const struct host1x_debug_ops *debug_op;
+
+ struct host1x_syncpt *nop_sp;
+
+ struct mutex chlist_mutex;
+ struct host1x_channel chlist;
+ unsigned long allocated_channels;
+ unsigned int num_allocated_channels;
+
+ struct dentry *debugfs;
+
+ void *drm_data;
+};
+
+void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
+u32 host1x_sync_readl(struct host1x *host1x, u32 r);
+void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
+u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
+
+static inline void host1x_hw_syncpt_restore(struct host1x *host,
+ struct host1x_syncpt *sp)
+{
+ host->syncpt_op->restore(sp);
+}
+
+static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
+ struct host1x_syncpt *sp)
+{
+ host->syncpt_op->restore_wait_base(sp);
+}
+
+static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
+ struct host1x_syncpt *sp)
+{
+ host->syncpt_op->load_wait_base(sp);
+}
+
+static inline u32 host1x_hw_syncpt_load(struct host1x *host,
+ struct host1x_syncpt *sp)
+{
+ return host->syncpt_op->load(sp);
+}
+
+static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host,
+ struct host1x_syncpt *sp)
+{
+ host->syncpt_op->cpu_incr(sp);
+}
+
+static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
+ struct host1x_syncpt *sp,
+ void *patch_addr)
+{
+ return host->syncpt_op->patch_wait(sp, patch_addr);
+}
+
+static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
+ void (*syncpt_thresh_work)(struct work_struct *))
+{
+ return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
+}
+
+static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
+ u32 id, u32 thresh)
+{
+ host->intr_op->set_syncpt_threshold(host, id, thresh);
+}
+
+static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
+ u32 id)
+{
+ host->intr_op->enable_syncpt_intr(host, id);
+}
+
+static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
+ u32 id)
+{
+ host->intr_op->disable_syncpt_intr(host, id);
+}
+
+static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
+{
+ host->intr_op->disable_all_syncpt_intrs(host);
+}
+
+static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
+{
+ return host->intr_op->free_syncpt_irq(host);
+}
+
+static inline int host1x_hw_channel_init(struct host1x *host,
+ struct host1x_channel *channel,
+ int chid)
+{
+ return host->channel_op->init(channel, host, chid);
+}
+
+static inline int host1x_hw_channel_submit(struct host1x *host,
+ struct host1x_job *job)
+{
+ return host->channel_op->submit(job);
+}
+
+static inline void host1x_hw_cdma_start(struct host1x *host,
+ struct host1x_cdma *cdma)
+{
+ host->cdma_op->start(cdma);
+}
+
+static inline void host1x_hw_cdma_stop(struct host1x *host,
+ struct host1x_cdma *cdma)
+{
+ host->cdma_op->stop(cdma);
+}
+
+static inline void host1x_hw_cdma_flush(struct host1x *host,
+ struct host1x_cdma *cdma)
+{
+ host->cdma_op->flush(cdma);
+}
+
+static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
+ struct host1x_cdma *cdma,
+ u32 syncpt_id)
+{
+ return host->cdma_op->timeout_init(cdma, syncpt_id);
+}
+
+static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
+ struct host1x_cdma *cdma)
+{
+ host->cdma_op->timeout_destroy(cdma);
+}
+
+static inline void host1x_hw_cdma_freeze(struct host1x *host,
+ struct host1x_cdma *cdma)
+{
+ host->cdma_op->freeze(cdma);
+}
+
+static inline void host1x_hw_cdma_resume(struct host1x *host,
+ struct host1x_cdma *cdma, u32 getptr)
+{
+ host->cdma_op->resume(cdma, getptr);
+}
+
+static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
+ struct host1x_cdma *cdma,
+ u32 getptr,
+ u32 syncpt_incrs,
+ u32 syncval, u32 nr_slots)
+{
+ host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
+ nr_slots);
+}
+
+static inline void host1x_hw_pushbuffer_init(struct host1x *host,
+ struct push_buffer *pb)
+{
+ host->cdma_pb_op->init(pb);
+}
+
+static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
+{
+ if (host->debug_op && host->debug_op->debug_init)
+ host->debug_op->debug_init(de);
+}
+
+static inline void host1x_hw_show_channel_cdma(struct host1x *host,
+ struct host1x_channel *channel,
+ struct output *o)
+{
+ host->debug_op->show_channel_cdma(host, channel, o);
+}
+
+static inline void host1x_hw_show_channel_fifo(struct host1x *host,
+ struct host1x_channel *channel,
+ struct output *o)
+{
+ host->debug_op->show_channel_fifo(host, channel, o);
+}
+
+static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
+{
+ host->debug_op->show_mlocks(host, o);
+}
+
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_gr2d_driver;
+
+#endif
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/host1x/drm/Kconfig
index be1daf7..69853a4 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/host1x/drm/Kconfig
@@ -1,12 +1,10 @@
config DRM_TEGRA
- tristate "NVIDIA Tegra DRM"
- depends on DRM && OF && ARCH_TEGRA
+ bool "NVIDIA Tegra DRM"
+ depends on DRM
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
help
Choose this option if you have an NVIDIA Tegra SoC.
@@ -15,6 +13,14 @@ config DRM_TEGRA
if DRM_TEGRA
+config DRM_TEGRA_STAGING
+ bool "Enable HOST1X interface"
+ depends on STAGING
+ help
+ Say yes if HOST1X should be available for userspace DRM users.
+
+ If unsure, choose N.
+
config DRM_TEGRA_DEBUG
bool "NVIDIA Tegra DRM debug support"
help
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/host1x/drm/dc.c
index de94707..1e20603 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -14,8 +14,10 @@
#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
-#include "drm.h"
+#include "host1x_client.h"
#include "dc.h"
+#include "drm.h"
+#include "gem.h"
struct tegra_plane {
struct drm_plane base;
@@ -51,9 +53,9 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.bits_per_pixel = fb->bits_per_pixel;
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
- window.base[i] = gem->paddr + fb->offsets[i];
+ window.base[i] = bo->paddr + fb->offsets[i];
/*
* Tegra doesn't support different strides for U and V planes
@@ -103,7 +105,9 @@ static const struct drm_plane_funcs tegra_plane_funcs = {
};
static const uint32_t plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
@@ -136,7 +140,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct drm_framebuffer *fb)
{
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0);
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned long value;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -144,7 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
value = fb->offsets[0] + y * fb->pitches[0] +
x * fb->bits_per_pixel / 8;
- tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
value = GENERAL_UPDATE | WIN_A_UPDATE;
@@ -186,20 +190,20 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
{
struct drm_device *drm = dc->base.dev;
struct drm_crtc *crtc = &dc->base;
- struct drm_gem_cma_object *gem;
unsigned long flags, base;
+ struct tegra_bo *bo;
if (!dc->event)
return;
- gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+ bo = tegra_fb_get_plane(crtc->fb, 0);
/* check if new start address has been latched */
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
- if (base == gem->paddr + crtc->fb->offsets[0]) {
+ if (base == bo->paddr + crtc->fb->offsets[0]) {
spin_lock_irqsave(&drm->event_lock, flags);
drm_send_vblank_event(drm, dc->pipe, dc->event);
drm_vblank_put(drm, dc->pipe);
@@ -541,6 +545,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
unsigned int tegra_dc_format(uint32_t format)
{
switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ return WIN_COLOR_DEPTH_R8G8B8A8;
+
case DRM_FORMAT_XRGB8888:
return WIN_COLOR_DEPTH_B8G8R8A8;
@@ -569,7 +576,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *adjusted,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+ struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_dc_window window;
unsigned long div, value;
@@ -616,7 +623,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
window.format = tegra_dc_format(crtc->fb->pixel_format);
window.bits_per_pixel = crtc->fb->bits_per_pixel;
window.stride[0] = crtc->fb->pitches[0];
- window.base[0] = gem->paddr;
+ window.base[0] = bo->paddr;
err = tegra_dc_setup_window(dc, 0, &window);
if (err < 0)
@@ -1097,7 +1104,7 @@ static const struct host1x_client_ops dc_client_ops = {
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1160,7 +1167,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
static int tegra_dc_remove(struct platform_device *pdev)
{
- struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_dc *dc = platform_get_drvdata(pdev);
int err;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/host1x/drm/dc.h
index 79eaec9..79eaec9 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/host1x/drm/dc.h
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
new file mode 100644
index 0000000..2b561c9
--- /dev/null
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+
+#include "host1x_client.h"
+#include "dev.h"
+#include "drm.h"
+#include "gem.h"
+#include "syncpt.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct host1x_drm_client {
+ struct host1x_client *client;
+ struct device_node *np;
+ struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x_drm *host1x,
+ struct device_node *np)
+{
+ struct host1x_drm_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&client->list);
+ client->np = of_node_get(np);
+
+ list_add_tail(&client->list, &host1x->drm_clients);
+
+ return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x_drm *host1x,
+ struct host1x_drm_client *drm,
+ struct host1x_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&drm->list);
+ list_add_tail(&drm->list, &host1x->drm_active);
+ drm->client = client;
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x_drm *host1x,
+ struct host1x_drm_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ of_node_put(client->np);
+ kfree(client);
+
+ return 0;
+}
+
+static int host1x_parse_dt(struct host1x_drm *host1x)
+{
+ static const char * const compat[] = {
+ "nvidia,tegra20-dc",
+ "nvidia,tegra20-hdmi",
+ "nvidia,tegra20-gr2d",
+ "nvidia,tegra30-dc",
+ "nvidia,tegra30-hdmi",
+ "nvidia,tegra30-gr2d",
+ };
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(compat); i++) {
+ struct device_node *np;
+
+ for_each_child_of_node(host1x->dev->of_node, np) {
+ if (of_device_is_compatible(np, compat[i]) &&
+ of_device_is_available(np)) {
+ err = host1x_add_drm_client(host1x, np);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int host1x_drm_alloc(struct platform_device *pdev)
+{
+ struct host1x_drm *host1x;
+ int err;
+
+ host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+ if (!host1x)
+ return -ENOMEM;
+
+ mutex_init(&host1x->drm_clients_lock);
+ INIT_LIST_HEAD(&host1x->drm_clients);
+ INIT_LIST_HEAD(&host1x->drm_active);
+ mutex_init(&host1x->clients_lock);
+ INIT_LIST_HEAD(&host1x->clients);
+ host1x->dev = &pdev->dev;
+
+ err = host1x_parse_dt(host1x);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+ return err;
+ }
+
+ host1x_set_drm_data(&pdev->dev, host1x);
+
+ return 0;
+}
+
+int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
+{
+ struct host1x_client *client;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_init) {
+ int err = client->ops->drm_init(client, drm);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM setup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+int host1x_drm_exit(struct host1x_drm *host1x)
+{
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+ struct host1x_client *client;
+
+ if (!host1x->drm)
+ return 0;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry_reverse(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_exit) {
+ int err = client->ops->drm_exit(client);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM cleanup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ drm_platform_exit(&tegra_drm_driver, pdev);
+ host1x->drm = NULL;
+
+ return 0;
+}
+
+int host1x_register_client(struct host1x_drm *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ mutex_lock(&host1x->clients_lock);
+ list_add_tail(&client->list, &host1x->clients);
+ mutex_unlock(&host1x->clients_lock);
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+ if (drm->np == client->dev->of_node)
+ host1x_activate_drm_client(host1x, drm, client);
+
+ if (list_empty(&host1x->drm_clients)) {
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+
+ err = drm_platform_init(&tegra_drm_driver, pdev);
+ if (err < 0) {
+ dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int host1x_unregister_client(struct host1x_drm *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+ if (drm->client == client) {
+ err = host1x_drm_exit(host1x);
+ if (err < 0) {
+ dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+ err);
+ return err;
+ }
+
+ host1x_remove_drm_client(host1x, drm);
+ break;
+ }
+ }
+
+ mutex_lock(&host1x->clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct host1x_drm *host1x;
+ int err;
+
+ host1x = host1x_get_drm_data(drm->dev);
+ drm->dev_private = host1x;
+ host1x->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ err = host1x_drm_init(host1x, drm);
+ if (err < 0)
+ return err;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ return err;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+
+ drm_mode_config_cleanup(drm);
+
+ return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ struct host1x_drm_file *fpriv;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (!fpriv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fpriv->contexts);
+ filp->driver_priv = fpriv;
+
+ return 0;
+}
+
+static void host1x_drm_context_free(struct host1x_drm_context *context)
+{
+ context->client->ops->close_channel(context);
+ kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+ struct host1x_drm *host1x = drm->dev_private;
+
+ tegra_fbdev_restore_mode(host1x->fbdev);
+}
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
+ struct host1x_drm_context *context)
+{
+ struct host1x_drm_context *ctx;
+
+ list_for_each_entry(ctx, &file->contexts, list)
+ if (ctx == context)
+ return true;
+
+ return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_create *args = data;
+ struct tegra_bo *bo;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_mmap *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -EINVAL;
+
+ bo = to_tegra_bo(gem);
+
+ args->offset = tegra_bo_get_mmap_offset(bo);
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_syncpt_read *args = data;
+ struct host1x *host = dev_get_drvdata(drm->dev);
+ struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+ if (!sp)
+ return -EINVAL;
+
+ args->value = host1x_syncpt_read_min(sp);
+ return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_syncpt_incr *args = data;
+ struct host1x *host = dev_get_drvdata(drm->dev);
+ struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+ if (!sp)
+ return -EINVAL;
+
+ host1x_syncpt_incr(sp);
+ return 0;
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_syncpt_wait *args = data;
+ struct host1x *host = dev_get_drvdata(drm->dev);
+ struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+ &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_open_channel *args = data;
+ struct host1x_client *client;
+ struct host1x_drm_context *context;
+ struct host1x_drm_file *fpriv = file->driver_priv;
+ struct host1x_drm *host1x = drm->dev_private;
+ int err = -ENODEV;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ list_for_each_entry(client, &host1x->clients, list)
+ if (client->class == args->client) {
+ err = client->ops->open_channel(client, context);
+ if (err)
+ break;
+
+ context->client = client;
+ list_add(&context->list, &fpriv->contexts);
+ args->context = (uintptr_t)context;
+ return 0;
+ }
+
+ kfree(context);
+ return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_close_channel *args = data;
+ struct host1x_drm_file *fpriv = file->driver_priv;
+ struct host1x_drm_context *context =
+ (struct host1x_drm_context *)(uintptr_t)args->context;
+
+ if (!host1x_drm_file_owns_context(fpriv, context))
+ return -EINVAL;
+
+ list_del(&context->list);
+ host1x_drm_context_free(context);
+
+ return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_get_syncpt *args = data;
+ struct host1x_drm_file *fpriv = file->driver_priv;
+ struct host1x_drm_context *context =
+ (struct host1x_drm_context *)(uintptr_t)args->context;
+ struct host1x_syncpt *syncpt;
+
+ if (!host1x_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ if (args->index >= context->client->num_syncpts)
+ return -EINVAL;
+
+ syncpt = context->client->syncpts[args->index];
+ args->id = host1x_syncpt_id(syncpt);
+
+ return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_submit *args = data;
+ struct host1x_drm_file *fpriv = file->driver_priv;
+ struct host1x_drm_context *context =
+ (struct host1x_drm_context *)(uintptr_t)args->context;
+
+ if (!host1x_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ return context->client->ops->submit(context, args, drm, file);
+}
+#endif
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = tegra_drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (dc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ /* TODO: implement real hardware counter using syncpoints */
+ return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!crtc)
+ return -ENODEV;
+
+ tegra_dc_enable_vblank(dc);
+
+ return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (crtc)
+ tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct host1x_drm_file *fpriv = file->driver_priv;
+ struct host1x_drm_context *context, *tmp;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ tegra_dc_cancel_page_flip(crtc, file);
+
+ list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+ host1x_drm_context_free(context);
+
+ kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height, fb->depth,
+ fb->bits_per_pixel,
+ atomic_read(&fb->refcount.refcount));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+ .load = tegra_drm_load,
+ .unload = tegra_drm_unload,
+ .open = tegra_drm_open,
+ .preclose = tegra_drm_preclose,
+ .lastclose = tegra_drm_lastclose,
+
+ .get_vblank_counter = tegra_drm_get_vblank_counter,
+ .enable_vblank = tegra_drm_enable_vblank,
+ .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+ .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+ .gem_free_object = tegra_bo_free_object,
+ .gem_vm_ops = &tegra_bo_vm_ops,
+ .dumb_create = tegra_bo_dumb_create,
+ .dumb_map_offset = tegra_bo_dumb_map_offset,
+ .dumb_destroy = tegra_bo_dumb_destroy,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/host1x/drm/drm.h
index 6dd75a2..02ce020 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/host1x/drm/drm.h
@@ -1,24 +1,36 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef TEGRA_DRM_H
-#define TEGRA_DRM_H 1
+#ifndef HOST1X_DRM_H
+#define HOST1X_DRM_H 1
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fixed.h>
+#include <uapi/drm/tegra_drm.h>
-struct host1x {
+#include "host1x.h"
+
+struct tegra_fb {
+ struct drm_framebuffer base;
+ struct tegra_bo **planes;
+ unsigned int num_planes;
+};
+
+struct tegra_fbdev {
+ struct drm_fb_helper base;
+ struct tegra_fb *fb;
+};
+
+struct host1x_drm {
struct drm_device *drm;
struct device *dev;
void __iomem *regs;
@@ -33,31 +45,53 @@ struct host1x {
struct mutex clients_lock;
struct list_head clients;
- struct drm_fbdev_cma *fbdev;
+ struct tegra_fbdev *fbdev;
};
struct host1x_client;
+struct host1x_drm_context {
+ struct host1x_client *client;
+ struct host1x_channel *channel;
+ struct list_head list;
+};
+
struct host1x_client_ops {
int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
int (*drm_exit)(struct host1x_client *client);
+ int (*open_channel)(struct host1x_client *client,
+ struct host1x_drm_context *context);
+ void (*close_channel)(struct host1x_drm_context *context);
+ int (*submit)(struct host1x_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file);
+};
+
+struct host1x_drm_file {
+ struct list_head contexts;
};
struct host1x_client {
- struct host1x *host1x;
+ struct host1x_drm *host1x;
struct device *dev;
const struct host1x_client_ops *ops;
+ enum host1x_class class;
+ struct host1x_channel *channel;
+
+ struct host1x_syncpt **syncpts;
+ unsigned int num_syncpts;
+
struct list_head list;
};
-extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x *host1x);
+extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x_drm *host1x);
-extern int host1x_register_client(struct host1x *host1x,
+extern int host1x_register_client(struct host1x_drm *host1x,
struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x *host1x,
+extern int host1x_unregister_client(struct host1x_drm *host1x,
struct host1x_client *client);
struct tegra_output;
@@ -66,7 +100,7 @@ struct tegra_dc {
struct host1x_client client;
spinlock_t lock;
- struct host1x *host1x;
+ struct host1x_drm *host1x;
struct device *dev;
struct drm_crtc base;
@@ -226,12 +260,12 @@ extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output
extern int tegra_output_exit(struct tegra_output *output);
/* from fb.c */
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+ unsigned int index);
extern int tegra_drm_fb_init(struct drm_device *drm);
extern void tegra_drm_fb_exit(struct drm_device *drm);
+extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
-extern struct platform_driver tegra_host1x_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_dc_driver;
extern struct drm_driver tegra_drm_driver;
-#endif /* TEGRA_DRM_H */
+#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/host1x/drm/fb.c
new file mode 100644
index 0000000..979a3e3
--- /dev/null
+++ b/drivers/gpu/host1x/drm/fb.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2012-2013 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on the KMS/FB CMA helpers
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include "drm.h"
+#include "gem.h"
+
+static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct tegra_fb, base);
+}
+
+static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
+{
+ return container_of(helper, struct tegra_fbdev, base);
+}
+
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+ unsigned int index)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+ if (index >= drm_format_num_planes(framebuffer->pixel_format))
+ return NULL;
+
+ return fb->planes[index];
+}
+
+static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+ unsigned int i;
+
+ for (i = 0; i < fb->num_planes; i++) {
+ struct tegra_bo *bo = fb->planes[i];
+
+ if (bo)
+ drm_gem_object_unreference_unlocked(&bo->gem);
+ }
+
+ drm_framebuffer_cleanup(framebuffer);
+ kfree(fb->planes);
+ kfree(fb);
+}
+
+static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
+ struct drm_file *file, unsigned int *handle)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+ return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
+}
+
+static struct drm_framebuffer_funcs tegra_fb_funcs = {
+ .destroy = tegra_fb_destroy,
+ .create_handle = tegra_fb_create_handle,
+};
+
+static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct tegra_bo **planes,
+ unsigned int num_planes)
+{
+ struct tegra_fb *fb;
+ unsigned int i;
+ int err;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
+ if (!fb->planes)
+ return ERR_PTR(-ENOMEM);
+
+ fb->num_planes = num_planes;
+
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+ for (i = 0; i < fb->num_planes; i++)
+ fb->planes[i] = planes[i];
+
+ err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
+ err);
+ kfree(fb->planes);
+ kfree(fb);
+ return ERR_PTR(err);
+ }
+
+ return fb;
+}
+
+static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+ struct drm_file *file,
+ struct drm_mode_fb_cmd2 *cmd)
+{
+ unsigned int hsub, vsub, i;
+ struct tegra_bo *planes[4];
+ struct drm_gem_object *gem;
+ struct tegra_fb *fb;
+ int err;
+
+ hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
+
+ for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
+ unsigned int width = cmd->width / (i ? hsub : 1);
+ unsigned int height = cmd->height / (i ? vsub : 1);
+ unsigned int size, bpp;
+
+ gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
+ if (!gem) {
+ err = -ENXIO;
+ goto unreference;
+ }
+
+ bpp = drm_format_plane_cpp(cmd->pixel_format, i);
+
+ size = (height - 1) * cmd->pitches[i] +
+ width * bpp + cmd->offsets[i];
+
+ if (gem->size < size) {
+ err = -EINVAL;
+ goto unreference;
+ }
+
+ planes[i] = to_tegra_bo(gem);
+ }
+
+ fb = tegra_fb_alloc(drm, cmd, planes, i);
+ if (IS_ERR(fb)) {
+ err = PTR_ERR(fb);
+ goto unreference;
+ }
+
+ return &fb->base;
+
+unreference:
+ while (i--)
+ drm_gem_object_unreference_unlocked(&planes[i]->gem);
+
+ return ERR_PTR(err);
+}
+
+static struct fb_ops tegra_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int tegra_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+ struct drm_device *drm = helper->dev;
+ struct drm_mode_fb_cmd2 cmd = { 0 };
+ unsigned int bytes_per_pixel;
+ struct drm_framebuffer *fb;
+ unsigned long offset;
+ struct fb_info *info;
+ struct tegra_bo *bo;
+ size_t size;
+ int err;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ cmd.width = sizes->surface_width;
+ cmd.height = sizes->surface_height;
+ cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = cmd.pitches[0] * cmd.height;
+
+ bo = tegra_bo_create(drm, size);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ info = framebuffer_alloc(0, drm->dev);
+ if (!info) {
+ dev_err(drm->dev, "failed to allocate framebuffer info\n");
+ tegra_bo_free_object(&bo->gem);
+ return -ENOMEM;
+ }
+
+ fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+ if (IS_ERR(fbdev->fb)) {
+ dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
+ err = PTR_ERR(fbdev->fb);
+ goto release;
+ }
+
+ fb = &fbdev->fb->base;
+ helper->fb = fb;
+ helper->fbdev = info;
+
+ info->par = helper;
+ info->flags = FBINFO_FLAG_DEFAULT;
+ info->fbops = &tegra_fb_ops;
+
+ err = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to allocate color map: %d\n", err);
+ goto destroy;
+ }
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
+
+ offset = info->var.xoffset * bytes_per_pixel +
+ info->var.yoffset * fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+ info->screen_base = bo->vaddr + offset;
+ info->screen_size = size;
+ info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+ info->fix.smem_len = size;
+
+ return 0;
+
+destroy:
+ drm_framebuffer_unregister_private(fb);
+ tegra_fb_destroy(fb);
+release:
+ framebuffer_release(info);
+ return err;
+}
+
+static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+ .fb_probe = tegra_fbdev_probe,
+};
+
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
+ unsigned int preferred_bpp,
+ unsigned int num_crtc,
+ unsigned int max_connectors)
+{
+ struct drm_fb_helper *helper;
+ struct tegra_fbdev *fbdev;
+ int err;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(drm->dev, "failed to allocate DRM fbdev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev->base.funcs = &tegra_fb_helper_funcs;
+ helper = &fbdev->base;
+
+ err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to initialize DRM FB helper\n");
+ goto free;
+ }
+
+ err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to add connectors\n");
+ goto fini;
+ }
+
+ drm_helper_disable_unused_functions(drm);
+
+ err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to set initial configuration\n");
+ goto fini;
+ }
+
+ return fbdev;
+
+fini:
+ drm_fb_helper_fini(&fbdev->base);
+free:
+ kfree(fbdev);
+ return ERR_PTR(err);
+}
+
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+ struct fb_info *info = fbdev->base.fbdev;
+
+ if (info) {
+ int err;
+
+ err = unregister_framebuffer(info);
+ if (err < 0)
+ DRM_DEBUG_KMS("failed to unregister framebuffer\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(&fbdev->fb->base);
+ tegra_fb_destroy(&fbdev->fb->base);
+ }
+
+ drm_fb_helper_fini(&fbdev->base);
+ kfree(fbdev);
+}
+
+static void tegra_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct host1x_drm *host1x = drm->dev_private;
+
+ if (host1x->fbdev)
+ drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+ .fb_create = tegra_fb_create,
+ .output_poll_changed = tegra_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+ struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_fbdev *fbdev;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+ fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
+
+ host1x->fbdev = fbdev;
+
+ return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+ struct host1x_drm *host1x = drm->dev_private;
+
+ tegra_fbdev_free(host1x->fbdev);
+}
+
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+ if (fbdev) {
+ drm_modeset_lock_all(fbdev->base.dev);
+ drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+ drm_modeset_unlock_all(fbdev->base.dev);
+ }
+}
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
new file mode 100644
index 0000000..c5e9a9b
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -0,0 +1,270 @@
+/*
+ * NVIDIA Tegra DRM GEM helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
+ *
+ * Based on the GEM/CMA helpers
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "gem.h"
+
+static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+{
+ return container_of(bo, struct tegra_bo, base);
+}
+
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct drm_device *drm = obj->gem.dev;
+
+ mutex_lock(&drm->struct_mutex);
+ drm_gem_object_unreference(&obj->gem);
+ mutex_unlock(&drm->struct_mutex);
+}
+
+static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+{
+ struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+ return obj->paddr;
+}
+
+static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+}
+
+static void *tegra_bo_mmap(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+ return obj->vaddr;
+}
+
+static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+}
+
+static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
+{
+ struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+ return obj->vaddr + page * PAGE_SIZE;
+}
+
+static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
+ void *addr)
+{
+}
+
+static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct drm_device *drm = obj->gem.dev;
+
+ mutex_lock(&drm->struct_mutex);
+ drm_gem_object_reference(&obj->gem);
+ mutex_unlock(&drm->struct_mutex);
+
+ return bo;
+}
+
+const struct host1x_bo_ops tegra_bo_ops = {
+ .get = tegra_bo_get,
+ .put = tegra_bo_put,
+ .pin = tegra_bo_pin,
+ .unpin = tegra_bo_unpin,
+ .mmap = tegra_bo_mmap,
+ .munmap = tegra_bo_munmap,
+ .kmap = tegra_bo_kmap,
+ .kunmap = tegra_bo_kunmap,
+};
+
+static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+{
+ dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+}
+
+unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
+{
+ return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+{
+ struct tegra_bo *bo;
+ int err;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ host1x_bo_init(&bo->base, &tegra_bo_ops);
+ size = round_up(size, PAGE_SIZE);
+
+ bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!bo->vaddr) {
+ dev_err(drm->dev, "failed to allocate buffer with size %u\n",
+ size);
+ err = -ENOMEM;
+ goto err_dma;
+ }
+
+ err = drm_gem_object_init(drm, &bo->gem, size);
+ if (err)
+ goto err_init;
+
+ err = drm_gem_create_mmap_offset(&bo->gem);
+ if (err)
+ goto err_mmap;
+
+ return bo;
+
+err_mmap:
+ drm_gem_object_release(&bo->gem);
+err_init:
+ tegra_bo_destroy(drm, bo);
+err_dma:
+ kfree(bo);
+
+ return ERR_PTR(err);
+
+}
+
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+ struct drm_device *drm,
+ unsigned int size,
+ unsigned int *handle)
+{
+ struct tegra_bo *bo;
+ int ret;
+
+ bo = tegra_bo_create(drm, size);
+ if (IS_ERR(bo))
+ return bo;
+
+ ret = drm_gem_handle_create(file, &bo->gem, handle);
+ if (ret)
+ goto err;
+
+ drm_gem_object_unreference_unlocked(&bo->gem);
+
+ return bo;
+
+err:
+ tegra_bo_free_object(&bo->gem);
+ return ERR_PTR(ret);
+}
+
+void tegra_bo_free_object(struct drm_gem_object *gem)
+{
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ if (gem->map_list.map)
+ drm_gem_free_mmap_offset(gem);
+
+ drm_gem_object_release(gem);
+ tegra_bo_destroy(gem->dev, bo);
+
+ kfree(bo);
+}
+
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct tegra_bo *bo;
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return 0;
+}
+
+int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ mutex_lock(&drm->struct_mutex);
+
+ gem = drm_gem_object_lookup(drm, file, handle);
+ if (!gem) {
+ dev_err(drm->dev, "failed to lookup GEM object\n");
+ mutex_unlock(&drm->struct_mutex);
+ return -EINVAL;
+ }
+
+ bo = to_tegra_bo(gem);
+
+ *offset = tegra_bo_get_mmap_offset(bo);
+
+ drm_gem_object_unreference(gem);
+
+ mutex_unlock(&drm->struct_mutex);
+
+ return 0;
+}
+
+const struct vm_operations_struct tegra_bo_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+ int ret;
+
+ ret = drm_gem_mmap(file, vma);
+ if (ret)
+ return ret;
+
+ gem = vma->vm_private_data;
+ bo = to_tegra_bo(gem);
+
+ ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+
+int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
+ unsigned int handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
new file mode 100644
index 0000000..34de2b4
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -0,0 +1,59 @@
+/*
+ * Tegra host1x GEM implementation
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_GEM_H
+#define __HOST1X_GEM_H
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+
+#include "host1x_bo.h"
+
+struct tegra_bo {
+ struct drm_gem_object gem;
+ struct host1x_bo base;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
+static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct tegra_bo, gem);
+}
+
+extern const struct host1x_bo_ops tegra_bo_ops;
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+ struct drm_device *drm,
+ unsigned int size,
+ unsigned int *handle);
+void tegra_bo_free_object(struct drm_gem_object *gem);
+unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
+ uint32_t handle, uint64_t *offset);
+int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
+ unsigned int handle);
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct tegra_bo_vm_ops;
+
+#endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
new file mode 100644
index 0000000..6a45ae0
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gr2d.c
@@ -0,0 +1,339 @@
+/*
+ * drivers/video/tegra/host/gr2d/gr2d.c
+ *
+ * Tegra Graphics 2D
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+
+#include "channel.h"
+#include "drm.h"
+#include "gem.h"
+#include "job.h"
+#include "host1x.h"
+#include "host1x_bo.h"
+#include "host1x_client.h"
+#include "syncpt.h"
+
+struct gr2d {
+ struct host1x_client client;
+ struct clk *clk;
+ struct host1x_channel *channel;
+ unsigned long *addr_regs;
+};
+
+static inline struct gr2d *to_gr2d(struct host1x_client *client)
+{
+ return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
+
+static int gr2d_client_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ return 0;
+}
+
+static int gr2d_client_exit(struct host1x_client *client)
+{
+ return 0;
+}
+
+static int gr2d_open_channel(struct host1x_client *client,
+ struct host1x_drm_context *context)
+{
+ struct gr2d *gr2d = to_gr2d(client);
+
+ context->channel = host1x_channel_get(gr2d->channel);
+
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr2d_close_channel(struct host1x_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
+ struct drm_file *file,
+ u32 handle)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, handle);
+ if (!gem)
+ return 0;
+
+ mutex_lock(&drm->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&drm->struct_mutex);
+
+ bo = to_tegra_bo(gem);
+ return &bo->base;
+}
+
+static int gr2d_submit(struct host1x_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file)
+{
+ struct host1x_job *job;
+ unsigned int num_cmdbufs = args->num_cmdbufs;
+ unsigned int num_relocs = args->num_relocs;
+ unsigned int num_waitchks = args->num_waitchks;
+ struct drm_tegra_cmdbuf __user *cmdbufs =
+ (void * __user)(uintptr_t)args->cmdbufs;
+ struct drm_tegra_reloc __user *relocs =
+ (void * __user)(uintptr_t)args->relocs;
+ struct drm_tegra_waitchk __user *waitchks =
+ (void * __user)(uintptr_t)args->waitchks;
+ struct drm_tegra_syncpt syncpt;
+ int err;
+
+ /* We don't yet support other than one syncpt_incr struct per submit */
+ if (args->num_syncpts != 1)
+ return -EINVAL;
+
+ job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+ args->num_relocs, args->num_waitchks);
+ if (!job)
+ return -ENOMEM;
+
+ job->num_relocs = args->num_relocs;
+ job->num_waitchk = args->num_waitchks;
+ job->client = (u32)args->context;
+ job->class = context->client->class;
+ job->serialize = true;
+
+ while (num_cmdbufs) {
+ struct drm_tegra_cmdbuf cmdbuf;
+ struct host1x_bo *bo;
+
+ err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+ if (err)
+ goto fail;
+
+ bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+ if (!bo)
+ goto fail;
+
+ host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+ num_cmdbufs--;
+ cmdbufs++;
+ }
+
+ err = copy_from_user(job->relocarray, relocs,
+ sizeof(*relocs) * num_relocs);
+ if (err)
+ goto fail;
+
+ while (num_relocs--) {
+ struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+ struct host1x_bo *cmdbuf, *target;
+
+ cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+ target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+ reloc->cmdbuf = cmdbuf;
+ reloc->target = target;
+
+ if (!reloc->target || !reloc->cmdbuf)
+ goto fail;
+ }
+
+ err = copy_from_user(job->waitchk, waitchks,
+ sizeof(*waitchks) * num_waitchks);
+ if (err)
+ goto fail;
+
+ err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+ sizeof(syncpt));
+ if (err)
+ goto fail;
+
+ job->syncpt_id = syncpt.id;
+ job->syncpt_incrs = syncpt.incrs;
+ job->timeout = 10000;
+ job->is_addr_reg = gr2d_is_addr_reg;
+
+ if (args->timeout && args->timeout < 10000)
+ job->timeout = args->timeout;
+
+ err = host1x_job_pin(job, context->client->dev);
+ if (err)
+ goto fail;
+
+ err = host1x_job_submit(job);
+ if (err)
+ goto fail_submit;
+
+ args->fence = job->syncpt_end;
+
+ host1x_job_put(job);
+ return 0;
+
+fail_submit:
+ host1x_job_unpin(job);
+fail:
+ host1x_job_put(job);
+ return err;
+}
+
+static struct host1x_client_ops gr2d_client_ops = {
+ .drm_init = gr2d_client_init,
+ .drm_exit = gr2d_client_exit,
+ .open_channel = gr2d_open_channel,
+ .close_channel = gr2d_close_channel,
+ .submit = gr2d_submit,
+};
+
+static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
+{
+ const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
+ 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
+ unsigned long *bitmap;
+ int i;
+
+ bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
+ GFP_KERNEL);
+
+ for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
+ u32 reg = gr2d_addr_regs[i];
+ bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
+ }
+
+ gr2d->addr_regs = bitmap;
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ return reg == 0x2b;
+ case HOST1X_CLASS_GR2D:
+ case HOST1X_CLASS_GR2D_SB:
+ reg &= 0xff;
+ if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct of_device_id gr2d_match[] = {
+ { .compatible = "nvidia,tegra30-gr2d" },
+ { .compatible = "nvidia,tegra20-gr2d" },
+ { },
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
+ int err;
+ struct gr2d *gr2d = NULL;
+ struct host1x_syncpt **syncpts;
+
+ gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+ if (!gr2d)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ gr2d->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(gr2d->clk)) {
+ dev_err(dev, "cannot get clock\n");
+ return PTR_ERR(gr2d->clk);
+ }
+
+ err = clk_prepare_enable(gr2d->clk);
+ if (err) {
+ dev_err(dev, "cannot turn on clock\n");
+ return err;
+ }
+
+ gr2d->channel = host1x_channel_request(dev);
+ if (!gr2d->channel)
+ return -ENOMEM;
+
+ *syncpts = host1x_syncpt_request(dev, 0);
+ if (!(*syncpts)) {
+ host1x_channel_free(gr2d->channel);
+ return -ENOMEM;
+ }
+
+ gr2d->client.ops = &gr2d_client_ops;
+ gr2d->client.dev = dev;
+ gr2d->client.class = HOST1X_CLASS_GR2D;
+ gr2d->client.syncpts = syncpts;
+ gr2d->client.num_syncpts = 1;
+
+ err = host1x_register_client(host1x, &gr2d->client);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ return err;
+ }
+
+ gr2d_init_addr_reg_map(dev, gr2d);
+
+ platform_set_drvdata(pdev, gr2d);
+
+ return 0;
+}
+
+static int __exit gr2d_remove(struct platform_device *pdev)
+{
+ struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+ struct gr2d *gr2d = platform_get_drvdata(pdev);
+ unsigned int i;
+ int err;
+
+ err = host1x_unregister_client(host1x, &gr2d->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < gr2d->client.num_syncpts; i++)
+ host1x_syncpt_free(gr2d->client.syncpts[i]);
+
+ host1x_channel_free(gr2d->channel);
+ clk_disable_unprepare(gr2d->clk);
+
+ return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+ .probe = gr2d_probe,
+ .remove = __exit_p(gr2d_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gr2d",
+ .of_match_table = gr2d_match,
+ }
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index bb747f6..01097da 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -22,6 +22,7 @@
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
+#include "host1x_client.h"
struct tegra_hdmi {
struct host1x_client client;
@@ -1189,7 +1190,7 @@ static const struct host1x_client_ops hdmi_client_ops = {
static int tegra_hdmi_probe(struct platform_device *pdev)
{
- struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
@@ -1278,7 +1279,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
static int tegra_hdmi_remove(struct platform_device *pdev)
{
- struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/host1x/drm/hdmi.h
index 52ac36e..52ac36e 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/host1x/drm/hdmi.h
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/host1x/drm/output.c
index 8140fc6..8140fc6 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/host1x/drm/output.c
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f..ed4416f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
new file mode 100644
index 0000000..a2bc1e6
--- /dev/null
+++ b/drivers/gpu/host1x/host1x.h
@@ -0,0 +1,30 @@
+/*
+ * Tegra host1x driver
+ *
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+enum host1x_class {
+ HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_GR2D = 0x51,
+ HOST1X_CLASS_GR2D_SB = 0x52
+};
+
+#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
new file mode 100644
index 0000000..4c1f10b
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_bo.h
@@ -0,0 +1,87 @@
+/*
+ * Tegra host1x Memory Management Abstraction header
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HOST1X_BO_H
+#define _HOST1X_BO_H
+
+struct host1x_bo;
+
+struct host1x_bo_ops {
+ struct host1x_bo *(*get)(struct host1x_bo *bo);
+ void (*put)(struct host1x_bo *bo);
+ dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+ void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ void *(*mmap)(struct host1x_bo *bo);
+ void (*munmap)(struct host1x_bo *bo, void *addr);
+ void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+ void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+ const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+ const struct host1x_bo_ops *ops)
+{
+ bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+ return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+ bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+ struct sg_table **sgt)
+{
+ return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+ bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+ return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+ bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+ return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+ unsigned int pagenum, void *addr)
+{
+ bo->ops->kunmap(bo, pagenum, addr);
+}
+
+#endif
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/host1x_client.h
new file mode 100644
index 0000000..9b85f10
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_client.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_CLIENT_H
+#define HOST1X_CLIENT_H
+
+struct device;
+struct platform_device;
+
+#ifdef CONFIG_DRM_TEGRA
+int host1x_drm_alloc(struct platform_device *pdev);
+#else
+static inline int host1x_drm_alloc(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+void host1x_set_drm_data(struct device *dev, void *data);
+void *host1x_get_drm_data(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
new file mode 100644
index 0000000..9b50863
--- /dev/null
+++ b/drivers/gpu/host1x/hw/Makefile
@@ -0,0 +1,6 @@
+ccflags-y = -Idrivers/gpu/host1x
+
+host1x-hw-objs = \
+ host1x01.o
+
+obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
new file mode 100644
index 0000000..590b69d
--- /dev/null
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -0,0 +1,326 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#include "cdma.h"
+#include "channel.h"
+#include "dev.h"
+#include "debug.h"
+
+/*
+ * Put the restart at the end of pushbuffer memor
+ */
+static void push_buffer_init(struct push_buffer *pb)
+{
+ *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
+}
+
+/*
+ * Increment timedout buffer's syncpt via CPU.
+ */
+static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
+ u32 syncpt_incrs, u32 syncval, u32 nr_slots)
+{
+ struct host1x *host1x = cdma_to_host1x(cdma);
+ struct push_buffer *pb = &cdma->push_buffer;
+ u32 i;
+
+ for (i = 0; i < syncpt_incrs; i++)
+ host1x_syncpt_cpu_incr(cdma->timeout.syncpt);
+
+ /* after CPU incr, ensure shadow is up to date */
+ host1x_syncpt_load(cdma->timeout.syncpt);
+
+ /* NOP all the PB slots */
+ while (nr_slots--) {
+ u32 *p = (u32 *)((u32)pb->mapped + getptr);
+ *(p++) = HOST1X_OPCODE_NOP;
+ *(p++) = HOST1X_OPCODE_NOP;
+ dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
+ pb->phys + getptr);
+ getptr = (getptr + 8) & (pb->size_bytes - 1);
+ }
+ wmb();
+}
+
+/*
+ * Start channel DMA
+ */
+static void cdma_start(struct host1x_cdma *cdma)
+{
+ struct host1x_channel *ch = cdma_to_channel(cdma);
+
+ if (cdma->running)
+ return;
+
+ cdma->last_pos = cdma->push_buffer.pos;
+
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+ HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, put and end pointer */
+ host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+ host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
+ host1x_ch_writel(ch, cdma->push_buffer.phys +
+ cdma->push_buffer.size_bytes + 4,
+ HOST1X_CHANNEL_DMAEND);
+
+ /* reset GET */
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
+ HOST1X_CHANNEL_DMACTRL_DMAGETRST |
+ HOST1X_CHANNEL_DMACTRL_DMAINITGET,
+ HOST1X_CHANNEL_DMACTRL);
+
+ /* start the command DMA */
+ host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+}
+
+/*
+ * Similar to cdma_start(), but rather than starting from an idle
+ * state (where DMA GET is set to DMA PUT), on a timeout we restore
+ * DMA GET from an explicit value (so DMA may again be pending).
+ */
+static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
+{
+ struct host1x *host1x = cdma_to_host1x(cdma);
+ struct host1x_channel *ch = cdma_to_channel(cdma);
+
+ if (cdma->running)
+ return;
+
+ cdma->last_pos = cdma->push_buffer.pos;
+
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+ HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, end pointer (all of memory) */
+ host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+ host1x_ch_writel(ch, cdma->push_buffer.phys +
+ cdma->push_buffer.size_bytes,
+ HOST1X_CHANNEL_DMAEND);
+
+ /* set GET, by loading the value in PUT (then reset GET) */
+ host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
+ HOST1X_CHANNEL_DMACTRL_DMAGETRST |
+ HOST1X_CHANNEL_DMACTRL_DMAINITGET,
+ HOST1X_CHANNEL_DMACTRL);
+
+ dev_dbg(host1x->dev,
+ "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
+ host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
+ host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
+ cdma->last_pos);
+
+ /* deassert GET reset and set PUT */
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+ HOST1X_CHANNEL_DMACTRL);
+ host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
+
+ /* start the command DMA */
+ host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+}
+
+/*
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void cdma_flush(struct host1x_cdma *cdma)
+{
+ struct host1x_channel *ch = cdma_to_channel(cdma);
+
+ if (cdma->push_buffer.pos != cdma->last_pos) {
+ host1x_ch_writel(ch, cdma->push_buffer.pos,
+ HOST1X_CHANNEL_DMAPUT);
+ cdma->last_pos = cdma->push_buffer.pos;
+ }
+}
+
+static void cdma_stop(struct host1x_cdma *cdma)
+{
+ struct host1x_channel *ch = cdma_to_channel(cdma);
+
+ mutex_lock(&cdma->lock);
+ if (cdma->running) {
+ host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+ HOST1X_CHANNEL_DMACTRL);
+ cdma->running = false;
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Stops both channel's command processor and CDMA immediately.
+ * Also, tears down the channel and resets corresponding module.
+ */
+static void cdma_freeze(struct host1x_cdma *cdma)
+{
+ struct host1x *host = cdma_to_host1x(cdma);
+ struct host1x_channel *ch = cdma_to_channel(cdma);
+ u32 cmdproc_stop;
+
+ if (cdma->torndown && !cdma->running) {
+ dev_warn(host->dev, "Already torn down\n");
+ return;
+ }
+
+ dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
+
+ cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
+ cmdproc_stop |= BIT(ch->id);
+ host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+ dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+ __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
+ host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
+ cdma->last_pos);
+
+ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+ HOST1X_CHANNEL_DMACTRL);
+
+ host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+
+ cdma->running = false;
+ cdma->torndown = true;
+}
+
+static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
+{
+ struct host1x *host1x = cdma_to_host1x(cdma);
+ struct host1x_channel *ch = cdma_to_channel(cdma);
+ u32 cmdproc_stop;
+
+ dev_dbg(host1x->dev,
+ "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+ ch->id, getptr);
+
+ cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
+ cmdproc_stop &= ~(BIT(ch->id));
+ host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+ cdma->torndown = false;
+ cdma_timeout_restart(cdma, getptr);
+}
+
+/*
+ * If this timeout fires, it indicates the current sync_queue entry has
+ * exceeded its TTL and the userctx should be timed out and remaining
+ * submits already issued cleaned up (future submits return an error).
+ */
+static void cdma_timeout_handler(struct work_struct *work)
+{
+ struct host1x_cdma *cdma;
+ struct host1x *host1x;
+ struct host1x_channel *ch;
+
+ u32 syncpt_val;
+
+ u32 prev_cmdproc, cmdproc_stop;
+
+ cdma = container_of(to_delayed_work(work), struct host1x_cdma,
+ timeout.wq);
+ host1x = cdma_to_host1x(cdma);
+ ch = cdma_to_channel(cdma);
+
+ host1x_debug_dump(cdma_to_host1x(cdma));
+
+ mutex_lock(&cdma->lock);
+
+ if (!cdma->timeout.client) {
+ dev_dbg(host1x->dev,
+ "cdma_timeout: expired, but has no clientid\n");
+ mutex_unlock(&cdma->lock);
+ return;
+ }
+
+ /* stop processing to get a clean snapshot */
+ prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
+ cmdproc_stop = prev_cmdproc | BIT(ch->id);
+ host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+ dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
+ prev_cmdproc, cmdproc_stop);
+
+ syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
+
+ /* has buffer actually completed? */
+ if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
+ dev_dbg(host1x->dev,
+ "cdma_timeout: expired, but buffer had completed\n");
+ /* restore */
+ cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
+ host1x_sync_writel(host1x, cmdproc_stop,
+ HOST1X_SYNC_CMDPROC_STOP);
+ mutex_unlock(&cdma->lock);
+ return;
+ }
+
+ dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
+ __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+ syncpt_val, cdma->timeout.syncpt_val);
+
+ /* stop HW, resetting channel/module */
+ host1x_hw_cdma_freeze(host1x, cdma);
+
+ host1x_cdma_update_sync_queue(cdma, ch->dev);
+ mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Init timeout resources
+ */
+static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+{
+ INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
+ cdma->timeout.initialized = true;
+
+ return 0;
+}
+
+/*
+ * Clean up timeout resources
+ */
+static void cdma_timeout_destroy(struct host1x_cdma *cdma)
+{
+ if (cdma->timeout.initialized)
+ cancel_delayed_work(&cdma->timeout.wq);
+ cdma->timeout.initialized = false;
+}
+
+static const struct host1x_cdma_ops host1x_cdma_ops = {
+ .start = cdma_start,
+ .stop = cdma_stop,
+ .flush = cdma_flush,
+
+ .timeout_init = cdma_timeout_init,
+ .timeout_destroy = cdma_timeout_destroy,
+ .freeze = cdma_freeze,
+ .resume = cdma_resume,
+ .timeout_cpu_incr = cdma_timeout_cpu_incr,
+};
+
+static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
+ .init = push_buffer_init,
+};
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
new file mode 100644
index 0000000..ee19962
--- /dev/null
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -0,0 +1,168 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <trace/events/host1x.h>
+
+#include "host1x.h"
+#include "host1x_bo.h"
+#include "channel.h"
+#include "dev.h"
+#include "intr.h"
+#include "job.h"
+
+#define HOST1X_CHANNEL_SIZE 16384
+#define TRACE_MAX_LENGTH 128U
+
+static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
+ u32 offset, u32 words)
+{
+ void *mem = NULL;
+
+ if (host1x_debug_trace_cmdbuf)
+ mem = host1x_bo_mmap(bo);
+
+ if (mem) {
+ u32 i;
+ /*
+ * Write in batches of 128 as there seems to be a limit
+ * of how much you can output to ftrace at once.
+ */
+ for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
+ trace_host1x_cdma_push_gather(
+ dev_name(cdma_to_channel(cdma)->dev),
+ (u32)bo, min(words - i, TRACE_MAX_LENGTH),
+ offset + i * sizeof(u32), mem);
+ }
+ host1x_bo_munmap(bo, mem);
+ }
+}
+
+static void submit_gathers(struct host1x_job *job)
+{
+ struct host1x_cdma *cdma = &job->channel->cdma;
+ unsigned int i;
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct host1x_job_gather *g = &job->gathers[i];
+ u32 op1 = host1x_opcode_gather(g->words);
+ u32 op2 = g->base + g->offset;
+ trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
+ host1x_cdma_push(cdma, op1, op2);
+ }
+}
+
+static int channel_submit(struct host1x_job *job)
+{
+ struct host1x_channel *ch = job->channel;
+ struct host1x_syncpt *sp;
+ u32 user_syncpt_incrs = job->syncpt_incrs;
+ u32 prev_max = 0;
+ u32 syncval;
+ int err;
+ struct host1x_waitlist *completed_waiter = NULL;
+ struct host1x *host = dev_get_drvdata(ch->dev->parent);
+
+ sp = host->syncpt + job->syncpt_id;
+ trace_host1x_channel_submit(dev_name(ch->dev),
+ job->num_gathers, job->num_relocs,
+ job->num_waitchk, job->syncpt_id,
+ job->syncpt_incrs);
+
+ /* before error checks, return current max */
+ prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&ch->submitlock);
+ if (err)
+ goto error;
+
+ completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
+ if (!completed_waiter) {
+ mutex_unlock(&ch->submitlock);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* begin a CDMA submit */
+ err = host1x_cdma_begin(&ch->cdma, job);
+ if (err) {
+ mutex_unlock(&ch->submitlock);
+ goto error;
+ }
+
+ if (job->serialize) {
+ /*
+ * Force serialization by inserting a host wait for the
+ * previous job to finish before this one can commence.
+ */
+ host1x_cdma_push(&ch->cdma,
+ host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+ host1x_uclass_wait_syncpt_r(), 1),
+ host1x_class_host_wait_syncpt(job->syncpt_id,
+ host1x_syncpt_read_max(sp)));
+ }
+
+ syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
+
+ job->syncpt_end = syncval;
+
+ /* add a setclass for modules that require it */
+ if (job->class)
+ host1x_cdma_push(&ch->cdma,
+ host1x_opcode_setclass(job->class, 0, 0),
+ HOST1X_OPCODE_NOP);
+
+ submit_gathers(job);
+
+ /* end CDMA submit & stash pinned hMems into sync queue */
+ host1x_cdma_end(&ch->cdma, job);
+
+ trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
+
+ /* schedule a submit complete interrupt */
+ err = host1x_intr_add_action(host, job->syncpt_id, syncval,
+ HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
+ completed_waiter, NULL);
+ completed_waiter = NULL;
+ WARN(err, "Failed to set submit complete interrupt");
+
+ mutex_unlock(&ch->submitlock);
+
+ return 0;
+
+error:
+ kfree(completed_waiter);
+ return err;
+}
+
+static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
+ unsigned int index)
+{
+ ch->id = index;
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+ return 0;
+}
+
+static const struct host1x_channel_ops host1x_channel_ops = {
+ .init = host1x_channel_init,
+ .submit = channel_submit,
+};
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
new file mode 100644
index 0000000..334c038
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "cdma.h"
+#include "channel.h"
+#include "host1x_bo.h"
+
+#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
+
+enum {
+ HOST1X_OPCODE_SETCLASS = 0x00,
+ HOST1X_OPCODE_INCR = 0x01,
+ HOST1X_OPCODE_NONINCR = 0x02,
+ HOST1X_OPCODE_MASK = 0x03,
+ HOST1X_OPCODE_IMM = 0x04,
+ HOST1X_OPCODE_RESTART = 0x05,
+ HOST1X_OPCODE_GATHER = 0x06,
+ HOST1X_OPCODE_EXTEND = 0x0e,
+};
+
+enum {
+ HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK = 0x00,
+ HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
+};
+
+static unsigned int show_channel_command(struct output *o, u32 val)
+{
+ unsigned mask;
+ unsigned subop;
+
+ switch (val >> 28) {
+ case HOST1X_OPCODE_SETCLASS:
+ mask = val & 0x3f;
+ if (mask) {
+ host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ val >> 6 & 0x3ff,
+ val >> 16 & 0xfff, mask);
+ return hweight8(mask);
+ } else {
+ host1x_debug_output(o, "SETCL(class=%03x)\n",
+ val >> 6 & 0x3ff);
+ return 0;
+ }
+
+ case HOST1X_OPCODE_INCR:
+ host1x_debug_output(o, "INCR(offset=%03x, [",
+ val >> 16 & 0xfff);
+ return val & 0xffff;
+
+ case HOST1X_OPCODE_NONINCR:
+ host1x_debug_output(o, "NONINCR(offset=%03x, [",
+ val >> 16 & 0xfff);
+ return val & 0xffff;
+
+ case HOST1X_OPCODE_MASK:
+ mask = val & 0xffff;
+ host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+ val >> 16 & 0xfff, mask);
+ return hweight16(mask);
+
+ case HOST1X_OPCODE_IMM:
+ host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+ val >> 16 & 0xfff, val & 0xffff);
+ return 0;
+
+ case HOST1X_OPCODE_RESTART:
+ host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+ return 0;
+
+ case HOST1X_OPCODE_GATHER:
+ host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ val >> 16 & 0xfff, val >> 15 & 0x1,
+ val >> 14 & 0x1, val & 0x3fff);
+ return 1;
+
+ case HOST1X_OPCODE_EXTEND:
+ subop = val >> 24 & 0xf;
+ if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
+ host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+ val & 0xff);
+ else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
+ host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+ val & 0xff);
+ else
+ host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+static void show_gather(struct output *o, phys_addr_t phys_addr,
+ unsigned int words, struct host1x_cdma *cdma,
+ phys_addr_t pin_addr, u32 *map_addr)
+{
+ /* Map dmaget cursor to corresponding mem handle */
+ u32 offset = phys_addr - pin_addr;
+ unsigned int data_count = 0, i;
+
+ /*
+ * Sometimes we're given different hardware address to the same
+ * page - in these cases the offset will get an invalid number and
+ * we just have to bail out.
+ */
+ if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
+ host1x_debug_output(o, "[address mismatch]\n");
+ return;
+ }
+
+ for (i = 0; i < words; i++) {
+ u32 addr = phys_addr + i * 4;
+ u32 val = *(map_addr + offset / 4 + i);
+
+ if (!data_count) {
+ host1x_debug_output(o, "%08x: %08x:", addr, val);
+ data_count = show_channel_command(o, val);
+ } else {
+ host1x_debug_output(o, "%08x%s", val,
+ data_count > 0 ? ", " : "])\n");
+ data_count--;
+ }
+ }
+}
+
+static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
+{
+ struct host1x_job *job;
+
+ list_for_each_entry(job, &cdma->sync_queue, list) {
+ int i;
+ host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
+ job, job->syncpt_id, job->syncpt_end,
+ job->first_get, job->timeout,
+ job->num_slots, job->num_unpins);
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct host1x_job_gather *g = &job->gathers[i];
+ u32 *mapped;
+
+ if (job->gather_copy_mapped)
+ mapped = (u32 *)job->gather_copy_mapped;
+ else
+ mapped = host1x_bo_mmap(g->bo);
+
+ if (!mapped) {
+ host1x_debug_output(o, "[could not mmap]\n");
+ continue;
+ }
+
+ host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n",
+ g->base, g->offset, g->words);
+
+ show_gather(o, g->base + g->offset, g->words, cdma,
+ g->base, mapped);
+
+ if (!job->gather_copy_mapped)
+ host1x_bo_munmap(g->bo, mapped);
+ }
+ }
+}
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ struct host1x_cdma *cdma = &ch->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 val, base, baseval;
+
+ dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+ dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+ dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+ cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
+ cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
+
+ host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+
+ if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
+ !ch->cdma.push_buffer.mapped) {
+ host1x_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT)
+ host1x_debug_output(o, "waiting on syncpt %d val %d\n",
+ cbread >> 24, cbread & 0xffffff);
+ else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
+ HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
+
+ base = (cbread >> 16) & 0xff;
+ baseval =
+ host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
+ val = cbread & 0xffff;
+ host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
+ cbread >> 24, baseval + val, base,
+ baseval, val);
+ } else
+ host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
+ HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
+ cbread);
+
+ host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+ show_channel_gathers(o, cdma);
+ host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ unsigned int data_count = 0;
+
+ host1x_debug_output(o, "%d: fifo:\n", ch->id);
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
+ host1x_debug_output(o, "FIFOSTAT %08x\n", val);
+ if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
+ host1x_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+ host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+ HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
+ HOST1X_SYNC_CFPEEK_CTRL);
+
+ val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
+ rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
+ wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
+
+ val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
+ start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
+ end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
+
+ do {
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+ host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+ HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
+ HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
+ HOST1X_SYNC_CFPEEK_CTRL);
+ val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
+
+ if (!data_count) {
+ host1x_debug_output(o, "%08x:", val);
+ data_count = show_channel_command(o, val);
+ } else {
+ host1x_debug_output(o, "%08x%s", val,
+ data_count > 0 ? ", " : "])\n");
+ data_count--;
+ }
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (data_count)
+ host1x_debug_output(o, ", ...])\n");
+ host1x_debug_output(o, "\n");
+
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+ int i;
+
+ host1x_debug_output(o, "---- mlocks ----\n");
+ for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
+ u32 owner =
+ host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
+ if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
+ host1x_debug_output(o, "%d: locked by channel %d\n",
+ i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
+ else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
+ host1x_debug_output(o, "%d: locked by cpu\n", i);
+ else
+ host1x_debug_output(o, "%d: unlocked\n", i);
+ }
+ host1x_debug_output(o, "\n");
+}
+
+static const struct host1x_debug_ops host1x_debug_ops = {
+ .show_channel_cdma = host1x_debug_show_channel_cdma,
+ .show_channel_fifo = host1x_debug_show_channel_fifo,
+ .show_mlocks = host1x_debug_show_mlocks,
+};
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
new file mode 100644
index 0000000..a14e91c
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for T20 and T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "hw/host1x01.h"
+#include "hw/host1x01_hardware.h"
+
+/* include code */
+#include "hw/cdma_hw.c"
+#include "hw/channel_hw.c"
+#include "hw/debug_hw.c"
+#include "hw/intr_hw.c"
+#include "hw/syncpt_hw.c"
+
+#include "dev.h"
+
+int host1x01_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x01.h b/drivers/gpu/host1x/hw/host1x01.h
new file mode 100644
index 0000000..2706b67
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.h
@@ -0,0 +1,25 @@
+/*
+ * Host1x init for T20 and T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HOST1X_HOST1X01_H
+#define HOST1X_HOST1X01_H
+
+struct host1x;
+
+int host1x01_init(struct host1x *host);
+
+#endif /* HOST1X_HOST1X01_H_ */
diff --git a/drivers/gpu/host1x/hw/host1x01_hardware.h b/drivers/gpu/host1x/hw/host1x01_hardware.h
new file mode 100644
index 0000000..5f0fb86
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01_hardware.h
@@ -0,0 +1,143 @@
+/*
+ * Tegra host1x Register Offsets for Tegra20 and Tegra30
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X01_HARDWARE_H
+#define __HOST1X_HOST1X01_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x01_channel.h"
+#include "hw_host1x01_sync.h"
+#include "hw_host1x01_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_channel.h b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
new file mode 100644
index 0000000..b4bc7ca
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef __hw_host1x_channel_host1x_h__
+#define __hw_host1x_channel_host1x_h__
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+ host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+ return (r >> 10) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+ host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+ return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+ host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+ return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+ host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+ return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+ host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+ return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+ host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+ return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+ host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+ return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+ host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+ host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+ return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+ host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+ return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+ host1x_channel_dmactrl_dmainitget()
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
new file mode 100644
index 0000000..ac704e5
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef __hw_host1x01_sync_h__
+#define __hw_host1x01_sync_h__
+
+#define REGISTER_STRIDE 4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+ return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+ host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+ return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+ host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+ return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+ host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+ return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+ host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+ return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+ host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+ return (r >> 0) & 0x1ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+ host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+ return (r >> 16) & 0x1ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+ host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+ return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+ host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+ return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+ host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+ return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+ host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+ return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+ host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+ return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+ host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+ return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+ host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+ return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+ host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+ host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+ host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+ return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+ host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+ return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+ host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+ return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+ host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+ return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+ host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+ return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+ host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+ return (v & 0x1ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+ host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+ return (v & 0x7) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+ host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+ host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+ return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+ host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+ return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+ host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+ return (r >> 0) & 0x1ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+ return (r >> 16) & 0x1ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+ return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+ host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+ host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+ host1x_sync_cbstat_cbclass_v(r)
+
+#endif /* __hw_host1x01_sync_h__ */
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
new file mode 100644
index 0000000..42f3ce1
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef __hw_host1x_uclass_host1x_h__
+#define __hw_host1x_uclass_host1x_h__
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644
index 0000000..b592eef
--- /dev/null
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -0,0 +1,143 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/mach/irq.h>
+
+#include "intr.h"
+#include "dev.h"
+
+/*
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
+{
+ unsigned int id = syncpt->id;
+ struct host1x *host = syncpt->host;
+
+ host1x_sync_writel(host, BIT_MASK(id),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT_MASK(id),
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+
+ queue_work(host->intr_wq, &syncpt->intr.work);
+}
+
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct host1x *host = dev_id;
+ unsigned long reg;
+ int i, id;
+
+ for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
+ reg = host1x_sync_readl(host,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+ for_each_set_bit(id, &reg, BITS_PER_LONG) {
+ struct host1x_syncpt *syncpt =
+ host->syncpt + (i * BITS_PER_LONG + id);
+ host1x_intr_syncpt_handle(syncpt);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
+{
+ u32 i;
+
+ for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
+ host1x_sync_writel(host, 0xffffffffu,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
+ host1x_sync_writel(host, 0xffffffffu,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+ }
+}
+
+static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+ void (*syncpt_thresh_work)(struct work_struct *))
+{
+ int i, err;
+
+ host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+ for (i = 0; i < host->info->nb_pts; i++)
+ INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
+
+ err = devm_request_irq(host->dev, host->intr_syncpt_irq,
+ syncpt_thresh_isr, IRQF_SHARED,
+ "host1x_syncpt", host);
+ if (IS_ERR_VALUE(err)) {
+ WARN_ON(1);
+ return err;
+ }
+
+ /* disable the ip_busy_timeout. this prevents write drops */
+ host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /*
+ * increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on Tegra2.
+ */
+ host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+ /* update host clocks per usec */
+ host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+
+ return 0;
+}
+
+static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
+ u32 id, u32 thresh)
+{
+ host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
+}
+
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+{
+ host1x_sync_writel(host, BIT_MASK(id),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
+}
+
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+{
+ host1x_sync_writel(host, BIT_MASK(id),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT_MASK(id),
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+}
+
+static int _host1x_free_syncpt_irq(struct host1x *host)
+{
+ devm_free_irq(host->dev, host->intr_syncpt_irq, host);
+ flush_workqueue(host->intr_wq);
+ return 0;
+}
+
+static const struct host1x_intr_ops host1x_intr_ops = {
+ .init_host_sync = _host1x_intr_init_host_sync,
+ .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
+ .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
+ .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
+ .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
+ .free_syncpt_irq = _host1x_free_syncpt_irq,
+};
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
new file mode 100644
index 0000000..6117499
--- /dev/null
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -0,0 +1,114 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "syncpt.h"
+
+/*
+ * Write the current syncpoint value back to hw.
+ */
+static void syncpt_restore(struct host1x_syncpt *sp)
+{
+ struct host1x *host = sp->host;
+ int min = host1x_syncpt_read_min(sp);
+ host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
+}
+
+/*
+ * Write the current waitbase value back to hw.
+ */
+static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
+{
+ struct host1x *host = sp->host;
+ host1x_sync_writel(host, sp->base_val,
+ HOST1X_SYNC_SYNCPT_BASE(sp->id));
+}
+
+/*
+ * Read waitbase value from hw.
+ */
+static void syncpt_read_wait_base(struct host1x_syncpt *sp)
+{
+ struct host1x *host = sp->host;
+ sp->base_val =
+ host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+}
+
+/*
+ * Updates the last value read from hardware.
+ */
+static u32 syncpt_load(struct host1x_syncpt *sp)
+{
+ struct host1x *host = sp->host;
+ u32 old, live;
+
+ /* Loop in case there's a race writing to min_val */
+ do {
+ old = host1x_syncpt_read_min(sp);
+ live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
+ } while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
+
+ if (!host1x_syncpt_check_max(sp, live))
+ dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
+ __func__, sp->id, host1x_syncpt_read_min(sp),
+ host1x_syncpt_read_max(sp));
+
+ return live;
+}
+
+/*
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache.
+ */
+static void syncpt_cpu_incr(struct host1x_syncpt *sp)
+{
+ struct host1x *host = sp->host;
+ u32 reg_offset = sp->id / 32;
+
+ if (!host1x_syncpt_client_managed(sp) &&
+ host1x_syncpt_idle(sp)) {
+ dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n",
+ sp->id);
+ host1x_debug_dump(sp->host);
+ return;
+ }
+ host1x_sync_writel(host, BIT_MASK(sp->id),
+ HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
+ wmb();
+}
+
+/* remove a wait pointed to by patch_addr */
+static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
+{
+ u32 override = host1x_class_host_wait_syncpt(
+ HOST1X_SYNCPT_RESERVED, 0);
+
+ *((u32 *)patch_addr) = override;
+ return 0;
+}
+
+static const struct host1x_syncpt_ops host1x_syncpt_ops = {
+ .restore = syncpt_restore,
+ .restore_wait_base = syncpt_restore_wait_base,
+ .load_wait_base = syncpt_read_wait_base,
+ .load = syncpt_load,
+ .cpu_incr = syncpt_cpu_incr,
+ .patch_wait = syncpt_patch_wait,
+};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
new file mode 100644
index 0000000..2491bf8
--- /dev/null
+++ b/drivers/gpu/host1x/intr.c
@@ -0,0 +1,354 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <trace/events/host1x.h>
+#include "channel.h"
+#include "dev.h"
+#include "intr.h"
+
+/* Wait list management */
+
+enum waitlist_state {
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct host1x_waitlist, refcount));
+}
+
+/*
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct host1x_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/*
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct host1x_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
+ !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct host1x_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else
+ list_move_tail(&waiter->list, dest);
+ }
+}
+
+static void reset_threshold_interrupt(struct host1x *host,
+ struct list_head *head,
+ unsigned int id)
+{
+ u32 thresh =
+ list_first_entry(head, struct host1x_waitlist, list)->thresh;
+
+ host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+ host1x_hw_intr_enable_syncpt_intr(host, id);
+}
+
+static void action_submit_complete(struct host1x_waitlist *waiter)
+{
+ struct host1x_channel *channel = waiter->data;
+
+ host1x_cdma_update(&channel->cdma);
+
+ /* Add nr_completed to trace */
+ trace_host1x_channel_submit_complete(dev_name(channel->dev),
+ waiter->count, waiter->thresh);
+
+}
+
+static void action_wakeup(struct host1x_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct host1x_waitlist *waiter);
+
+static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct host1x_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
+ WLS_REMOVED);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+/*
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+static int process_wait_list(struct host1x *host,
+ struct host1x_syncpt *syncpt,
+ u32 threshold)
+{
+ struct list_head completed[HOST1X_INTR_ACTION_COUNT];
+ unsigned int i;
+ int empty;
+
+ for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ spin_lock(&syncpt->intr.lock);
+
+ remove_completed_waiters(&syncpt->intr.wait_head, threshold,
+ completed);
+
+ empty = list_empty(&syncpt->intr.wait_head);
+ if (empty)
+ host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
+ else
+ reset_threshold_interrupt(host, &syncpt->intr.wait_head,
+ syncpt->id);
+
+ spin_unlock(&syncpt->intr.lock);
+
+ run_handlers(completed);
+
+ return empty;
+}
+
+/*
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+
+static void syncpt_thresh_work(struct work_struct *work)
+{
+ struct host1x_syncpt_intr *syncpt_intr =
+ container_of(work, struct host1x_syncpt_intr, work);
+ struct host1x_syncpt *syncpt =
+ container_of(syncpt_intr, struct host1x_syncpt, intr);
+ unsigned int id = syncpt->id;
+ struct host1x *host = syncpt->host;
+
+ (void)process_wait_list(host, syncpt,
+ host1x_syncpt_load(host->syncpt + id));
+}
+
+int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+ enum host1x_intr_action action, void *data,
+ struct host1x_waitlist *waiter, void **ref)
+{
+ struct host1x_syncpt *syncpt;
+ int queue_was_empty;
+
+ if (waiter == NULL) {
+ pr_warn("%s: NULL waiter\n", __func__);
+ return -EINVAL;
+ }
+
+ /* initialize a new waiter */
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ syncpt = host->syncpt + id;
+
+ spin_lock(&syncpt->intr.lock);
+
+ queue_was_empty = list_empty(&syncpt->intr.wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
+ /* added at head of list - new threshold value */
+ host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ host1x_hw_intr_enable_syncpt_intr(host, id);
+ }
+
+ spin_unlock(&syncpt->intr.lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+{
+ struct host1x_waitlist *waiter = ref;
+ struct host1x_syncpt *syncpt;
+
+ while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
+ WLS_REMOVED)
+ schedule();
+
+ syncpt = host->syncpt + id;
+ (void)process_wait_list(host, syncpt,
+ host1x_syncpt_load(host->syncpt + id));
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
+{
+ unsigned int id;
+ u32 nb_pts = host1x_syncpt_nb_pts(host);
+
+ mutex_init(&host->intr_mutex);
+ host->intr_syncpt_irq = irq_sync;
+ host->intr_wq = create_workqueue("host_syncpt");
+ if (!host->intr_wq)
+ return -ENOMEM;
+
+ for (id = 0; id < nb_pts; ++id) {
+ struct host1x_syncpt *syncpt = host->syncpt + id;
+
+ spin_lock_init(&syncpt->intr.lock);
+ INIT_LIST_HEAD(&syncpt->intr.wait_head);
+ snprintf(syncpt->intr.thresh_irq_name,
+ sizeof(syncpt->intr.thresh_irq_name),
+ "host1x_sp_%02d", id);
+ }
+
+ host1x_intr_start(host);
+
+ return 0;
+}
+
+void host1x_intr_deinit(struct host1x *host)
+{
+ host1x_intr_stop(host);
+ destroy_workqueue(host->intr_wq);
+}
+
+void host1x_intr_start(struct host1x *host)
+{
+ u32 hz = clk_get_rate(host->clk);
+ int err;
+
+ mutex_lock(&host->intr_mutex);
+ err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
+ syncpt_thresh_work);
+ if (err) {
+ mutex_unlock(&host->intr_mutex);
+ return;
+ }
+ mutex_unlock(&host->intr_mutex);
+}
+
+void host1x_intr_stop(struct host1x *host)
+{
+ unsigned int id;
+ struct host1x_syncpt *syncpt = host->syncpt;
+ u32 nb_pts = host1x_syncpt_nb_pts(host);
+
+ mutex_lock(&host->intr_mutex);
+
+ host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+ for (id = 0; id < nb_pts; ++id) {
+ struct host1x_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next,
+ &syncpt[id].intr.wait_head, list) {
+ if (atomic_cmpxchg(&waiter->state,
+ WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+
+ if (!list_empty(&syncpt[id].intr.wait_head)) {
+ /* output diagnostics */
+ mutex_unlock(&host->intr_mutex);
+ pr_warn("%s cannot stop syncpt intr id=%d\n",
+ __func__, id);
+ return;
+ }
+ }
+
+ host1x_hw_intr_free_syncpt_irq(host);
+
+ mutex_unlock(&host->intr_mutex);
+}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
new file mode 100644
index 0000000..2b8adf0
--- /dev/null
+++ b/drivers/gpu/host1x/intr.h
@@ -0,0 +1,102 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_INTR_H
+#define __HOST1X_INTR_H
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct host1x;
+
+enum host1x_intr_action {
+ /*
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /*
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ HOST1X_INTR_ACTION_WAKEUP,
+
+ /*
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ HOST1X_INTR_ACTION_COUNT
+};
+
+struct host1x_syncpt_intr {
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+ struct work_struct work;
+};
+
+struct host1x_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum host1x_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+/*
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @waiter waiter structure - assumes ownership
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+ enum host1x_intr_action action, void *data,
+ struct host1x_waitlist *waiter, void **ref);
+
+/*
+ * Unreference an action submitted to host1x_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from host1x_intr_add_action()
+ */
+void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+
+/* Initialize host1x sync point interrupt */
+int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
+
+/* Deinitialize host1x sync point interrupt */
+void host1x_intr_deinit(struct host1x *host);
+
+/* Enable host1x sync point interrupt */
+void host1x_intr_start(struct host1x *host);
+
+/* Disable host1x sync point interrupt */
+void host1x_intr_stop(struct host1x *host);
+
+irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
+#endif
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
new file mode 100644
index 0000000..f665d67
--- /dev/null
+++ b/drivers/gpu/host1x/job.c
@@ -0,0 +1,603 @@
+/*
+ * Tegra host1x Job
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <trace/events/host1x.h>
+
+#include "channel.h"
+#include "dev.h"
+#include "host1x_bo.h"
+#include "job.h"
+#include "syncpt.h"
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+ u32 num_cmdbufs, u32 num_relocs,
+ u32 num_waitchks)
+{
+ struct host1x_job *job = NULL;
+ unsigned int num_unpins = num_cmdbufs + num_relocs;
+ u64 total;
+ void *mem;
+
+ /* Check that we're not going to overflow */
+ total = sizeof(struct host1x_job) +
+ num_relocs * sizeof(struct host1x_reloc) +
+ num_unpins * sizeof(struct host1x_job_unpin_data) +
+ num_waitchks * sizeof(struct host1x_waitchk) +
+ num_cmdbufs * sizeof(struct host1x_job_gather) +
+ num_unpins * sizeof(dma_addr_t) +
+ num_unpins * sizeof(u32 *);
+ if (total > ULONG_MAX)
+ return NULL;
+
+ mem = job = kzalloc(total, GFP_KERNEL);
+ if (!job)
+ return NULL;
+
+ kref_init(&job->ref);
+ job->channel = ch;
+
+ /* Redistribute memory to the structs */
+ mem += sizeof(struct host1x_job);
+ job->relocarray = num_relocs ? mem : NULL;
+ mem += num_relocs * sizeof(struct host1x_reloc);
+ job->unpins = num_unpins ? mem : NULL;
+ mem += num_unpins * sizeof(struct host1x_job_unpin_data);
+ job->waitchk = num_waitchks ? mem : NULL;
+ mem += num_waitchks * sizeof(struct host1x_waitchk);
+ job->gathers = num_cmdbufs ? mem : NULL;
+ mem += num_cmdbufs * sizeof(struct host1x_job_gather);
+ job->addr_phys = num_unpins ? mem : NULL;
+
+ job->reloc_addr_phys = job->addr_phys;
+ job->gather_addr_phys = &job->addr_phys[num_relocs];
+
+ return job;
+}
+
+struct host1x_job *host1x_job_get(struct host1x_job *job)
+{
+ kref_get(&job->ref);
+ return job;
+}
+
+static void job_free(struct kref *ref)
+{
+ struct host1x_job *job = container_of(ref, struct host1x_job, ref);
+
+ kfree(job);
+}
+
+void host1x_job_put(struct host1x_job *job)
+{
+ kref_put(&job->ref, job_free);
+}
+
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+ u32 words, u32 offset)
+{
+ struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
+
+ cur_gather->words = words;
+ cur_gather->bo = bo;
+ cur_gather->offset = offset;
+ job->num_gathers++;
+}
+
+/*
+ * NULL an already satisfied WAIT_SYNCPT host method, by patching its
+ * args in the command stream. The method data is changed to reference
+ * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
+ * with a matching threshold value of 0, so is guaranteed to be popped
+ * by the host HW.
+ */
+static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
+ struct host1x_bo *h, u32 offset)
+{
+ void *patch_addr = NULL;
+
+ /* patch the wait */
+ patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
+ if (patch_addr) {
+ host1x_syncpt_patch_wait(sp,
+ patch_addr + (offset & ~PAGE_MASK));
+ host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
+ } else
+ pr_err("Could not map cmdbuf for wait check\n");
+}
+
+/*
+ * Check driver supplied waitchk structs for syncpt thresholds
+ * that have already been satisfied and NULL the comparison (to
+ * avoid a wrap condition in the HW).
+ */
+static int do_waitchks(struct host1x_job *job, struct host1x *host,
+ struct host1x_bo *patch)
+{
+ int i;
+
+ /* compare syncpt vs wait threshold */
+ for (i = 0; i < job->num_waitchk; i++) {
+ struct host1x_waitchk *wait = &job->waitchk[i];
+ struct host1x_syncpt *sp =
+ host1x_syncpt_get(host, wait->syncpt_id);
+
+ /* validate syncpt id */
+ if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
+ continue;
+
+ /* skip all other gathers */
+ if (patch != wait->bo)
+ continue;
+
+ trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
+ wait->syncpt_id, wait->thresh,
+ host1x_syncpt_read_min(sp));
+
+ if (host1x_syncpt_is_expired(sp, wait->thresh)) {
+ dev_dbg(host->dev,
+ "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+ wait->syncpt_id, sp->name, wait->thresh,
+ host1x_syncpt_read_min(sp));
+
+ host1x_syncpt_patch_offset(sp, patch, wait->offset);
+ }
+
+ wait->bo = NULL;
+ }
+
+ return 0;
+}
+
+static unsigned int pin_job(struct host1x_job *job)
+{
+ unsigned int i;
+
+ job->num_unpins = 0;
+
+ for (i = 0; i < job->num_relocs; i++) {
+ struct host1x_reloc *reloc = &job->relocarray[i];
+ struct sg_table *sgt;
+ dma_addr_t phys_addr;
+
+ reloc->target = host1x_bo_get(reloc->target);
+ if (!reloc->target)
+ goto unpin;
+
+ phys_addr = host1x_bo_pin(reloc->target, &sgt);
+ if (!phys_addr)
+ goto unpin;
+
+ job->addr_phys[job->num_unpins] = phys_addr;
+ job->unpins[job->num_unpins].bo = reloc->target;
+ job->unpins[job->num_unpins].sgt = sgt;
+ job->num_unpins++;
+ }
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct host1x_job_gather *g = &job->gathers[i];
+ struct sg_table *sgt;
+ dma_addr_t phys_addr;
+
+ g->bo = host1x_bo_get(g->bo);
+ if (!g->bo)
+ goto unpin;
+
+ phys_addr = host1x_bo_pin(g->bo, &sgt);
+ if (!phys_addr)
+ goto unpin;
+
+ job->addr_phys[job->num_unpins] = phys_addr;
+ job->unpins[job->num_unpins].bo = g->bo;
+ job->unpins[job->num_unpins].sgt = sgt;
+ job->num_unpins++;
+ }
+
+ return job->num_unpins;
+
+unpin:
+ host1x_job_unpin(job);
+ return 0;
+}
+
+static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+{
+ int i = 0;
+ u32 last_page = ~0;
+ void *cmdbuf_page_addr = NULL;
+
+ /* pin & patch the relocs for one gather */
+ while (i < job->num_relocs) {
+ struct host1x_reloc *reloc = &job->relocarray[i];
+ u32 reloc_addr = (job->reloc_addr_phys[i] +
+ reloc->target_offset) >> reloc->shift;
+ u32 *target;
+
+ /* skip all other gathers */
+ if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) {
+ i++;
+ continue;
+ }
+
+ if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
+ if (cmdbuf_page_addr)
+ host1x_bo_kunmap(cmdbuf, last_page,
+ cmdbuf_page_addr);
+
+ cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
+ reloc->cmdbuf_offset >> PAGE_SHIFT);
+ last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
+
+ if (unlikely(!cmdbuf_page_addr)) {
+ pr_err("Could not map cmdbuf for relocation\n");
+ return -ENOMEM;
+ }
+ }
+
+ target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
+ *target = reloc_addr;
+
+ /* mark this gather as handled */
+ reloc->cmdbuf = 0;
+ }
+
+ if (cmdbuf_page_addr)
+ host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
+
+ return 0;
+}
+
+static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
+ unsigned int offset)
+{
+ offset *= sizeof(u32);
+
+ if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct host1x_firewall {
+ struct host1x_job *job;
+ struct device *dev;
+
+ unsigned int num_relocs;
+ struct host1x_reloc *reloc;
+
+ struct host1x_bo *cmdbuf_id;
+ unsigned int offset;
+
+ u32 words;
+ u32 class;
+ u32 reg;
+ u32 mask;
+ u32 count;
+};
+
+static int check_mask(struct host1x_firewall *fw)
+{
+ u32 mask = fw->mask;
+ u32 reg = fw->reg;
+
+ while (mask) {
+ if (fw->words == 0)
+ return -EINVAL;
+
+ if (mask & 1) {
+ if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
+ bool bad_reloc = check_reloc(fw->reloc,
+ fw->cmdbuf_id,
+ fw->offset);
+ if (!fw->num_relocs || bad_reloc)
+ return -EINVAL;
+ fw->reloc++;
+ fw->num_relocs--;
+ }
+ fw->words--;
+ fw->offset++;
+ }
+ mask >>= 1;
+ reg++;
+ }
+
+ return 0;
+}
+
+static int check_incr(struct host1x_firewall *fw)
+{
+ u32 count = fw->count;
+ u32 reg = fw->reg;
+
+ while (fw) {
+ if (fw->words == 0)
+ return -EINVAL;
+
+ if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
+ bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
+ fw->offset);
+ if (!fw->num_relocs || bad_reloc)
+ return -EINVAL;
+ fw->reloc++;
+ fw->num_relocs--;
+ }
+ reg++;
+ fw->words--;
+ fw->offset++;
+ count--;
+ }
+
+ return 0;
+}
+
+static int check_nonincr(struct host1x_firewall *fw)
+{
+ int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
+ u32 count = fw->count;
+
+ while (count) {
+ if (fw->words == 0)
+ return -EINVAL;
+
+ if (is_addr_reg) {
+ bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
+ fw->offset);
+ if (!fw->num_relocs || bad_reloc)
+ return -EINVAL;
+ fw->reloc++;
+ fw->num_relocs--;
+ }
+ fw->words--;
+ fw->offset++;
+ count--;
+ }
+
+ return 0;
+}
+
+static int validate(struct host1x_job *job, struct device *dev,
+ struct host1x_job_gather *g)
+{
+ u32 *cmdbuf_base;
+ int err = 0;
+ struct host1x_firewall fw;
+
+ fw.job = job;
+ fw.dev = dev;
+ fw.reloc = job->relocarray;
+ fw.num_relocs = job->num_relocs;
+ fw.cmdbuf_id = g->bo;
+
+ fw.offset = 0;
+ fw.class = 0;
+
+ if (!job->is_addr_reg)
+ return 0;
+
+ cmdbuf_base = host1x_bo_mmap(g->bo);
+ if (!cmdbuf_base)
+ return -ENOMEM;
+
+ fw.words = g->words;
+ while (fw.words && !err) {
+ u32 word = cmdbuf_base[fw.offset];
+ u32 opcode = (word & 0xf0000000) >> 28;
+
+ fw.mask = 0;
+ fw.reg = 0;
+ fw.count = 0;
+ fw.words--;
+ fw.offset++;
+
+ switch (opcode) {
+ case 0:
+ fw.class = word >> 6 & 0x3ff;
+ fw.mask = word & 0x3f;
+ fw.reg = word >> 16 & 0xfff;
+ err = check_mask(&fw);
+ if (err)
+ goto out;
+ break;
+ case 1:
+ fw.reg = word >> 16 & 0xfff;
+ fw.count = word & 0xffff;
+ err = check_incr(&fw);
+ if (err)
+ goto out;
+ break;
+
+ case 2:
+ fw.reg = word >> 16 & 0xfff;
+ fw.count = word & 0xffff;
+ err = check_nonincr(&fw);
+ if (err)
+ goto out;
+ break;
+
+ case 3:
+ fw.mask = word & 0xffff;
+ fw.reg = word >> 16 & 0xfff;
+ err = check_mask(&fw);
+ if (err)
+ goto out;
+ break;
+ case 4:
+ case 5:
+ case 14:
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ /* No relocs should remain at this point */
+ if (fw.num_relocs)
+ err = -EINVAL;
+
+out:
+ host1x_bo_munmap(g->bo, cmdbuf_base);
+
+ return err;
+}
+
+static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+{
+ size_t size = 0;
+ size_t offset = 0;
+ int i;
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct host1x_job_gather *g = &job->gathers[i];
+ size += g->words * sizeof(u32);
+ }
+
+ job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
+ &job->gather_copy,
+ GFP_KERNEL);
+ if (!job->gather_copy_mapped) {
+ int err = PTR_ERR(job->gather_copy_mapped);
+ job->gather_copy_mapped = NULL;
+ return err;
+ }
+
+ job->gather_copy_size = size;
+
+ for (i = 0; i < job->num_gathers; i++) {
+ struct host1x_job_gather *g = &job->gathers[i];
+ void *gather;
+
+ gather = host1x_bo_mmap(g->bo);
+ memcpy(job->gather_copy_mapped + offset, gather + g->offset,
+ g->words * sizeof(u32));
+ host1x_bo_munmap(g->bo, gather);
+
+ g->base = job->gather_copy;
+ g->offset = offset;
+ g->bo = NULL;
+
+ offset += g->words * sizeof(u32);
+ }
+
+ return 0;
+}
+
+int host1x_job_pin(struct host1x_job *job, struct device *dev)
+{
+ int err;
+ unsigned int i, j;
+ struct host1x *host = dev_get_drvdata(dev->parent);
+ DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
+
+ bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
+ for (i = 0; i < job->num_waitchk; i++) {
+ u32 syncpt_id = job->waitchk[i].syncpt_id;
+ if (syncpt_id < host1x_syncpt_nb_pts(host))
+ set_bit(syncpt_id, waitchk_mask);
+ }
+
+ /* get current syncpt values for waitchk */
+ for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
+ host1x_syncpt_load(host->syncpt + i);
+
+ /* pin memory */
+ err = pin_job(job);
+ if (!err)
+ goto out;
+
+ /* patch gathers */
+ for (i = 0; i < job->num_gathers; i++) {
+ struct host1x_job_gather *g = &job->gathers[i];
+
+ /* process each gather mem only once */
+ if (g->handled)
+ continue;
+
+ g->base = job->gather_addr_phys[i];
+
+ for (j = 0; j < job->num_gathers; j++)
+ if (job->gathers[j].bo == g->bo)
+ job->gathers[j].handled = true;
+
+ err = 0;
+
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ err = validate(job, dev, g);
+
+ if (err)
+ dev_err(dev, "Job invalid (err=%d)\n", err);
+
+ if (!err)
+ err = do_relocs(job, g->bo);
+
+ if (!err)
+ err = do_waitchks(job, host, g->bo);
+
+ if (err)
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
+ err = copy_gathers(job, dev);
+ if (err) {
+ host1x_job_unpin(job);
+ return err;
+ }
+ }
+
+out:
+ wmb();
+
+ return err;
+}
+
+void host1x_job_unpin(struct host1x_job *job)
+{
+ unsigned int i;
+
+ for (i = 0; i < job->num_unpins; i++) {
+ struct host1x_job_unpin_data *unpin = &job->unpins[i];
+ host1x_bo_unpin(unpin->bo, unpin->sgt);
+ host1x_bo_put(unpin->bo);
+ }
+ job->num_unpins = 0;
+
+ if (job->gather_copy_size)
+ dma_free_writecombine(job->channel->dev, job->gather_copy_size,
+ job->gather_copy_mapped,
+ job->gather_copy);
+}
+
+/*
+ * Debug routine used to dump job entries
+ */
+void host1x_job_dump(struct device *dev, struct host1x_job *job)
+{
+ dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
+ dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
+ dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
+ dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
+ dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
+ dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
+}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
new file mode 100644
index 0000000..fba45f2
--- /dev/null
+++ b/drivers/gpu/host1x/job.h
@@ -0,0 +1,162 @@
+/*
+ * Tegra host1x Job
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_JOB_H
+#define __HOST1X_JOB_H
+
+struct host1x_job_gather {
+ u32 words;
+ dma_addr_t base;
+ struct host1x_bo *bo;
+ int offset;
+ bool handled;
+};
+
+struct host1x_cmdbuf {
+ u32 handle;
+ u32 offset;
+ u32 words;
+ u32 pad;
+};
+
+struct host1x_reloc {
+ struct host1x_bo *cmdbuf;
+ u32 cmdbuf_offset;
+ struct host1x_bo *target;
+ u32 target_offset;
+ u32 shift;
+ u32 pad;
+};
+
+struct host1x_waitchk {
+ struct host1x_bo *bo;
+ u32 offset;
+ u32 syncpt_id;
+ u32 thresh;
+};
+
+struct host1x_job_unpin_data {
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+};
+
+/*
+ * Each submit is tracked as a host1x_job.
+ */
+struct host1x_job {
+ /* When refcount goes to zero, job can be freed */
+ struct kref ref;
+
+ /* List entry */
+ struct list_head list;
+
+ /* Channel where job is submitted to */
+ struct host1x_channel *channel;
+
+ u32 client;
+
+ /* Gathers and their memory */
+ struct host1x_job_gather *gathers;
+ unsigned int num_gathers;
+
+ /* Wait checks to be processed at submit time */
+ struct host1x_waitchk *waitchk;
+ unsigned int num_waitchk;
+ u32 waitchk_mask;
+
+ /* Array of handles to be pinned & unpinned */
+ struct host1x_reloc *relocarray;
+ unsigned int num_relocs;
+ struct host1x_job_unpin_data *unpins;
+ unsigned int num_unpins;
+
+ dma_addr_t *addr_phys;
+ dma_addr_t *gather_addr_phys;
+ dma_addr_t *reloc_addr_phys;
+
+ /* Sync point id, number of increments and end related to the submit */
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 syncpt_end;
+
+ /* Maximum time to wait for this job */
+ unsigned int timeout;
+
+ /* Index and number of slots used in the push buffer */
+ unsigned int first_get;
+ unsigned int num_slots;
+
+ /* Copy of gathers */
+ size_t gather_copy_size;
+ dma_addr_t gather_copy;
+ u8 *gather_copy_mapped;
+
+ /* Check if register is marked as an address reg */
+ int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+ /* Request a SETCLASS to this class */
+ u32 class;
+
+ /* Add a channel wait for previous ops to complete */
+ bool serialize;
+};
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit.
+ */
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+ u32 num_cmdbufs, u32 num_relocs,
+ u32 num_waitchks);
+
+/*
+ * Add a gather to a job.
+ */
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+ u32 words, u32 offset);
+
+/*
+ * Increment reference going to host1x_job.
+ */
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+
+/*
+ * Decrement reference job, free if goes to zero.
+ */
+void host1x_job_put(struct host1x_job *job);
+
+/*
+ * Pin memory related to job. This handles relocation of addresses to the
+ * host1x address space. Handles both the gather memory and any other memory
+ * referred to from the gather buffers.
+ *
+ * Handles also patching out host waits that would wait for an expired sync
+ * point value.
+ */
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+
+/*
+ * Unpin memory related to job.
+ */
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * Dump contents of job to debug output.
+ */
+void host1x_job_dump(struct device *dev, struct host1x_job *job);
+
+#endif
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
new file mode 100644
index 0000000..4b49345
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.c
@@ -0,0 +1,387 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <trace/events/host1x.h>
+
+#include "syncpt.h"
+#include "dev.h"
+#include "intr.h"
+#include "debug.h"
+
+#define SYNCPT_CHECK_PERIOD (2 * HZ)
+#define MAX_STUCK_CHECK_COUNT 15
+
+static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
+ struct device *dev,
+ int client_managed)
+{
+ int i;
+ struct host1x_syncpt *sp = host->syncpt;
+ char *name;
+
+ for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
+ ;
+ if (sp->dev)
+ return NULL;
+
+ name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+ dev ? dev_name(dev) : NULL);
+ if (!name)
+ return NULL;
+
+ sp->dev = dev;
+ sp->name = name;
+ sp->client_managed = client_managed;
+
+ return sp;
+}
+
+u32 host1x_syncpt_id(struct host1x_syncpt *sp)
+{
+ return sp->id;
+}
+
+/*
+ * Updates the value sent to hardware.
+ */
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val);
+}
+
+ /*
+ * Write cached syncpoint and waitbase values to hardware.
+ */
+void host1x_syncpt_restore(struct host1x *host)
+{
+ struct host1x_syncpt *sp_base = host->syncpt;
+ u32 i;
+
+ for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+ host1x_hw_syncpt_restore(host, sp_base + i);
+ for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
+ host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+ wmb();
+}
+
+/*
+ * Update the cached syncpoint and waitbase values by reading them
+ * from the registers.
+ */
+void host1x_syncpt_save(struct host1x *host)
+{
+ struct host1x_syncpt *sp_base = host->syncpt;
+ u32 i;
+
+ for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+ if (host1x_syncpt_client_managed(sp_base + i))
+ host1x_hw_syncpt_load(host, sp_base + i);
+ else
+ WARN_ON(!host1x_syncpt_idle(sp_base + i));
+ }
+
+ for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
+ host1x_hw_syncpt_load_wait_base(host, sp_base + i);
+}
+
+/*
+ * Updates the cached syncpoint value by reading a new value from the hardware
+ * register
+ */
+u32 host1x_syncpt_load(struct host1x_syncpt *sp)
+{
+ u32 val;
+ val = host1x_hw_syncpt_load(sp->host, sp);
+ trace_host1x_syncpt_load_min(sp->id, val);
+
+ return val;
+}
+
+/*
+ * Get the current syncpoint base
+ */
+u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
+{
+ u32 val;
+ host1x_hw_syncpt_load_wait_base(sp->host, sp);
+ val = sp->base_val;
+ return val;
+}
+
+/*
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
+{
+ host1x_hw_syncpt_cpu_incr(sp->host, sp);
+}
+
+/*
+ * Increment syncpoint value from cpu, updating cache
+ */
+void host1x_syncpt_incr(struct host1x_syncpt *sp)
+{
+ if (host1x_syncpt_client_managed(sp))
+ host1x_syncpt_incr_max(sp, 1);
+ host1x_syncpt_cpu_incr(sp);
+}
+
+/*
+ * Updated sync point form hardware, and returns true if syncpoint is expired,
+ * false if we may need to wait
+ */
+static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
+{
+ host1x_hw_syncpt_load(sp->host, sp);
+ return host1x_syncpt_is_expired(sp, thresh);
+}
+
+/*
+ * Main entrypoint for syncpoint value waits.
+ */
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+ u32 *value)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ struct host1x_waitlist *waiter;
+ int err = 0, check_count = 0;
+ u32 val;
+
+ if (value)
+ *value = 0;
+
+ /* first check cache */
+ if (host1x_syncpt_is_expired(sp, thresh)) {
+ if (value)
+ *value = host1x_syncpt_load(sp);
+ return 0;
+ }
+
+ /* try to read from register */
+ val = host1x_hw_syncpt_load(sp->host, sp);
+ if (host1x_syncpt_is_expired(sp, thresh)) {
+ if (value)
+ *value = val;
+ goto done;
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* allocate a waiter */
+ waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ err = host1x_intr_add_action(sp->host, sp->id, thresh,
+ HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+ &wq, waiter, &ref);
+ if (err)
+ goto done;
+
+ err = -EAGAIN;
+ /* Caller-specified timeout may be impractically low */
+ if (timeout < 0)
+ timeout = LONG_MAX;
+
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
+ int remain = wait_event_interruptible_timeout(wq,
+ syncpt_load_min_is_expired(sp, thresh),
+ check);
+ if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
+ if (value)
+ *value = host1x_syncpt_load(sp);
+ err = 0;
+ break;
+ }
+ if (remain < 0) {
+ err = remain;
+ break;
+ }
+ timeout -= check;
+ if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
+ dev_warn(sp->host->dev,
+ "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+ current->comm, sp->id, sp->name,
+ thresh, timeout);
+
+ host1x_debug_dump_syncpts(sp->host);
+ if (check_count == MAX_STUCK_CHECK_COUNT)
+ host1x_debug_dump(sp->host);
+ check_count++;
+ }
+ }
+ host1x_intr_put_ref(sp->host, sp->id, ref);
+
+done:
+ return err;
+}
+EXPORT_SYMBOL(host1x_syncpt_wait);
+
+/*
+ * Returns true if syncpoint is expired, false if we may need to wait
+ */
+bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
+{
+ u32 current_val;
+ u32 future_val;
+ smp_rmb();
+ current_val = (u32)atomic_read(&sp->min_val);
+ future_val = (u32)atomic_read(&sp->max_val);
+
+ /* Note the use of unsigned arithmetic here (mod 1<<32).
+ *
+ * c = current_val = min_val = the current value of the syncpoint.
+ * t = thresh = the value we are checking
+ * f = future_val = max_val = the value c will reach when all
+ * outstanding increments have completed.
+ *
+ * Note that c always chases f until it reaches f.
+ *
+ * Dtf = (f - t)
+ * Dtc = (c - t)
+ *
+ * Consider all cases:
+ *
+ * A) .....c..t..f..... Dtf < Dtc need to wait
+ * B) .....c.....f..t.. Dtf > Dtc expired
+ * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
+ *
+ * Any case where f==c: always expired (for any t). Dtf == Dcf
+ * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
+ * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
+ * Dtc!=0)
+ *
+ * Other cases:
+ *
+ * A) .....t..f..c..... Dtf < Dtc need to wait
+ * A) .....f..c..t..... Dtf < Dtc need to wait
+ * A) .....f..t..c..... Dtf > Dtc expired
+ *
+ * So:
+ * Dtf >= Dtc implies EXPIRED (return true)
+ * Dtf < Dtc implies WAIT (return false)
+ *
+ * Note: If t is expired then we *cannot* wait on it. We would wait
+ * forever (hang the system).
+ *
+ * Note: do NOT get clever and remove the -thresh from both sides. It
+ * is NOT the same.
+ *
+ * If future valueis zero, we have a client managed sync point. In that
+ * case we do a direct comparison.
+ */
+ if (!host1x_syncpt_client_managed(sp))
+ return future_val - thresh >= current_val - thresh;
+ else
+ return (s32)(current_val - thresh) >= 0;
+}
+
+/* remove a wait pointed to by patch_addr */
+int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
+{
+ return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
+}
+
+int host1x_syncpt_init(struct host1x *host)
+{
+ struct host1x_syncpt *syncpt;
+ int i;
+
+ syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+ GFP_KERNEL);
+ if (!syncpt)
+ return -ENOMEM;
+
+ for (i = 0; i < host->info->nb_pts; ++i) {
+ syncpt[i].id = i;
+ syncpt[i].host = host;
+ }
+
+ host->syncpt = syncpt;
+
+ host1x_syncpt_restore(host);
+
+ /* Allocate sync point to use for clearing waits for expired fences */
+ host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
+ if (!host->nop_sp)
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+ int client_managed)
+{
+ struct host1x *host = dev_get_drvdata(dev->parent);
+ return _host1x_syncpt_alloc(host, dev, client_managed);
+}
+
+void host1x_syncpt_free(struct host1x_syncpt *sp)
+{
+ if (!sp)
+ return;
+
+ kfree(sp->name);
+ sp->dev = NULL;
+ sp->name = NULL;
+ sp->client_managed = 0;
+}
+
+void host1x_syncpt_deinit(struct host1x *host)
+{
+ int i;
+ struct host1x_syncpt *sp = host->syncpt;
+ for (i = 0; i < host->info->nb_pts; i++, sp++)
+ kfree(sp->name);
+}
+
+int host1x_syncpt_nb_pts(struct host1x *host)
+{
+ return host->info->nb_pts;
+}
+
+int host1x_syncpt_nb_bases(struct host1x *host)
+{
+ return host->info->nb_bases;
+}
+
+int host1x_syncpt_nb_mlocks(struct host1x *host)
+{
+ return host->info->nb_mlocks;
+}
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+{
+ if (host->info->nb_pts < id)
+ return NULL;
+ return host->syncpt + id;
+}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
new file mode 100644
index 0000000..c998061
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.h
@@ -0,0 +1,165 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_SYNCPT_H
+#define __HOST1X_SYNCPT_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "intr.h"
+
+struct host1x;
+
+/* Reserved for replacing an expired wait with a NOP */
+#define HOST1X_SYNCPT_RESERVED 0
+
+struct host1x_syncpt {
+ int id;
+ atomic_t min_val;
+ atomic_t max_val;
+ u32 base_val;
+ const char *name;
+ int client_managed;
+ struct host1x *host;
+ struct device *dev;
+
+ /* interrupt data */
+ struct host1x_syncpt_intr intr;
+};
+
+/* Initialize sync point array */
+int host1x_syncpt_init(struct host1x *host);
+
+/* Free sync point array */
+void host1x_syncpt_deinit(struct host1x *host);
+
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->min_val);
+}
+
+/* Return number of sync point supported. */
+int host1x_syncpt_nb_pts(struct host1x *host);
+
+/* Return number of wait bases supported. */
+int host1x_syncpt_nb_bases(struct host1x *host);
+
+/* Return number of mlocks supported. */
+int host1x_syncpt_nb_mlocks(struct host1x *host);
+
+/*
+ * Check sync point sanity. If max is larger than min, there have too many
+ * sync point increments.
+ *
+ * Client managed sync point are not tracked.
+ * */
+static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
+{
+ u32 max;
+ if (sp->client_managed)
+ return true;
+ max = host1x_syncpt_read_max(sp);
+ return (s32)(max - real) >= 0;
+}
+
+/* Return true if sync point is client managed. */
+static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp)
+{
+ return sp->client_managed;
+}
+
+/*
+ * Returns true if syncpoint min == max, which means that there are no
+ * outstanding operations.
+ */
+static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val);
+ max = atomic_read(&sp->max_val);
+ return (min == max);
+}
+
+/* Return pointer to struct denoting sync point id. */
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+
+/* Request incrementing a sync point. */
+void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
+
+/* Load current value from hardware to the shadow register. */
+u32 host1x_syncpt_load(struct host1x_syncpt *sp);
+
+/* Check if the given syncpoint value has already passed */
+bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
+
+/* Save host1x sync point state into shadow registers. */
+void host1x_syncpt_save(struct host1x *host);
+
+/* Reset host1x sync point state from shadow registers. */
+void host1x_syncpt_restore(struct host1x *host);
+
+/* Read current wait base value into shadow register and return it. */
+u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
+
+/* Increment sync point and its max. */
+void host1x_syncpt_incr(struct host1x_syncpt *sp);
+
+/* Indicate future operations by incrementing the sync point max. */
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
+
+/* Wait until sync point reaches a threshold value, or a timeout. */
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
+ long timeout, u32 *value);
+
+/* Check if sync point id is valid. */
+static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
+{
+ return sp->id < host1x_syncpt_nb_pts(sp->host);
+}
+
+/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
+int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
+
+/* Return id of the sync point */
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+
+/* Allocate a sync point for a device. */
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+ int client_managed);
+
+/* Free a sync point. */
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+#endif
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9c333d4..0428e8a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -630,7 +630,7 @@ config SENSORS_LM75
temperature sensor chip, with models including:
- Analog Devices ADT75
- - Dallas Semiconductor DS75 and DS1775
+ - Dallas Semiconductor DS75, DS1775 and DS7505
- Maxim MAX6625 and MAX6626
- Microchip MCP980x
- National Semiconductor LM75, LM75A
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index b4ad87b..eee1134 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -260,7 +260,7 @@ static ssize_t show_max_alarm(struct device *dev,
return sprintf(buf, "%d\n", data->max_alarm[attr->index]);
}
-static mode_t abx500_attrs_visible(struct kobject *kobj,
+static umode_t abx500_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 291edff..c03b490 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -38,6 +38,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
adt75,
ds1775,
ds75,
+ ds7505,
lm75,
lm75a,
max6625,
@@ -71,9 +72,12 @@ struct lm75_data {
struct device *hwmon_dev;
struct mutex update_lock;
u8 orig_conf;
+ u8 resolution; /* In bits, between 9 and 12 */
+ u8 resolution_limits;
char valid; /* !=0 if registers are valid */
unsigned long last_updated; /* In jiffies */
- u16 temp[3]; /* Register values,
+ unsigned long sample_time; /* In jiffies */
+ s16 temp[3]; /* Register values,
0 = input
1 = max
2 = hyst */
@@ -93,12 +97,15 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
+ long temp;
if (IS_ERR(data))
return PTR_ERR(data);
- return sprintf(buf, "%d\n",
- LM75_TEMP_FROM_REG(data->temp[attr->index]));
+ temp = ((data->temp[attr->index] >> (16 - data->resolution)) * 1000)
+ >> (data->resolution - 8);
+
+ return sprintf(buf, "%ld\n", temp);
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
@@ -110,13 +117,25 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
int nr = attr->index;
long temp;
int error;
+ u8 resolution;
error = kstrtol(buf, 10, &temp);
if (error)
return error;
+ /*
+ * Resolution of limit registers is assumed to be the same as the
+ * temperature input register resolution unless given explicitly.
+ */
+ if (attr->index && data->resolution_limits)
+ resolution = data->resolution_limits;
+ else
+ resolution = data->resolution;
+
mutex_lock(&data->update_lock);
- data->temp[nr] = LM75_TEMP_TO_REG(temp);
+ temp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
+ data->temp[nr] = DIV_ROUND_CLOSEST(temp << (resolution - 8),
+ 1000) << (16 - resolution);
lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -151,6 +170,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
u8 set_mask, clr_mask;
int new;
+ enum lm75_type kind = id->driver_data;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
@@ -167,8 +187,65 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
* Then tweak to be more precise when appropriate.
*/
set_mask = 0;
- clr_mask = (1 << 0) /* continuous conversions */
- | (1 << 6) | (1 << 5); /* 9-bit mode */
+ clr_mask = LM75_SHUTDOWN; /* continuous conversions */
+
+ switch (kind) {
+ case adt75:
+ clr_mask |= 1 << 5; /* not one-shot mode */
+ data->resolution = 12;
+ data->sample_time = HZ / 8;
+ break;
+ case ds1775:
+ case ds75:
+ case stds75:
+ clr_mask |= 3 << 5;
+ set_mask |= 2 << 5; /* 11-bit mode */
+ data->resolution = 11;
+ data->sample_time = HZ;
+ break;
+ case ds7505:
+ set_mask |= 3 << 5; /* 12-bit mode */
+ data->resolution = 12;
+ data->sample_time = HZ / 4;
+ break;
+ case lm75:
+ case lm75a:
+ data->resolution = 9;
+ data->sample_time = HZ / 2;
+ break;
+ case max6625:
+ data->resolution = 9;
+ data->sample_time = HZ / 4;
+ break;
+ case max6626:
+ data->resolution = 12;
+ data->resolution_limits = 9;
+ data->sample_time = HZ / 4;
+ break;
+ case tcn75:
+ data->resolution = 9;
+ data->sample_time = HZ / 8;
+ break;
+ case mcp980x:
+ data->resolution_limits = 9;
+ /* fall through */
+ case tmp100:
+ case tmp101:
+ set_mask |= 3 << 5; /* 12-bit mode */
+ data->resolution = 12;
+ data->sample_time = HZ;
+ clr_mask |= 1 << 7; /* not one-shot mode */
+ break;
+ case tmp105:
+ case tmp175:
+ case tmp275:
+ case tmp75:
+ set_mask |= 3 << 5; /* 12-bit mode */
+ clr_mask |= 1 << 7; /* not one-shot mode */
+ data->resolution = 12;
+ data->sample_time = HZ / 2;
+ break;
+ }
/* configure as specified */
status = lm75_read_value(client, LM75_REG_CONF);
@@ -218,6 +295,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "adt75", adt75, },
{ "ds1775", ds1775, },
{ "ds75", ds75, },
+ { "ds7505", ds7505, },
{ "lm75", lm75, },
{ "lm75a", lm75a, },
{ "max6625", max6625, },
@@ -407,7 +485,7 @@ static struct lm75_data *lm75_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ if (time_after(jiffies, data->last_updated + data->sample_time)
|| !data->valid) {
int i;
dev_dbg(&client->dev, "Starting lm75 update\n");
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index c7c3128..70637d2 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -10,7 +10,7 @@ menu "Hardware Spinlock drivers"
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
- depends on ARCH_OMAP4
+ depends on ARCH_OMAP4 || SOC_OMAP5
select HWSPINLOCK
help
Say y here to support the OMAP Hardware Spinlock device (firstly
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 378fcb5..07f01ac 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -169,11 +169,7 @@ static int __init amd756_s4882_init(void)
}
/* Unregister physical bus */
- error = i2c_del_adapter(&amd756_smbus);
- if (error) {
- dev_err(&amd756_smbus.dev, "Physical bus removal failed\n");
- goto ERROR0;
- }
+ i2c_del_adapter(&amd756_smbus);
printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n");
/* Define the 5 virtual adapters and algorithms structures */
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 75195e3..6bb839b 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -603,15 +603,18 @@ static const struct of_device_id atmel_twi_dt_ids[] = {
}
};
MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
-#else
-#define atmel_twi_dt_ids NULL
#endif
-static bool filter(struct dma_chan *chan, void *slave)
+static bool filter(struct dma_chan *chan, void *pdata)
{
- struct at_dma_slave *sl = slave;
+ struct at91_twi_pdata *sl_pdata = pdata;
+ struct at_dma_slave *sl;
+
+ if (!sl_pdata)
+ return false;
- if (sl->dma_dev == chan->device->dev) {
+ sl = &sl_pdata->dma_slave;
+ if (sl && (sl->dma_dev == chan->device->dev)) {
chan->private = sl;
return true;
} else {
@@ -622,11 +625,10 @@ static bool filter(struct dma_chan *chan, void *slave)
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
{
int ret = 0;
- struct at_dma_slave *sdata;
+ struct at91_twi_pdata *pdata = dev->pdata;
struct dma_slave_config slave_config;
struct at91_twi_dma *dma = &dev->dma;
-
- sdata = &dev->pdata->dma_slave;
+ dma_cap_mask_t mask;
memset(&slave_config, 0, sizeof(slave_config));
slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
@@ -637,25 +639,22 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
slave_config.dst_maxburst = 1;
slave_config.device_fc = false;
- if (sdata && sdata->dma_dev) {
- dma_cap_mask_t mask;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma->chan_tx = dma_request_channel(mask, filter, sdata);
- if (!dma->chan_tx) {
- dev_err(dev->dev, "no DMA channel available for tx\n");
- ret = -EBUSY;
- goto error;
- }
- dma->chan_rx = dma_request_channel(mask, filter, sdata);
- if (!dma->chan_rx) {
- dev_err(dev->dev, "no DMA channel available for rx\n");
- ret = -EBUSY;
- goto error;
- }
- } else {
- ret = -EINVAL;
+ dma->chan_tx = dma_request_slave_channel_compat(mask, filter, pdata,
+ dev->dev, "tx");
+ if (!dma->chan_tx) {
+ dev_err(dev->dev, "can't get a DMA channel for tx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+
+ dma->chan_rx = dma_request_slave_channel_compat(mask, filter, pdata,
+ dev->dev, "rx");
+ if (!dma->chan_rx) {
+ dev_err(dev->dev, "can't get a DMA channel for rx\n");
+ ret = -EBUSY;
goto error;
}
@@ -785,12 +784,11 @@ static int at91_twi_probe(struct platform_device *pdev)
static int at91_twi_remove(struct platform_device *pdev)
{
struct at91_twi_dev *dev = platform_get_drvdata(pdev);
- int rc;
- rc = i2c_del_adapter(&dev->adapter);
+ i2c_del_adapter(&dev->adapter);
clk_disable_unprepare(dev->clk);
- return rc;
+ return 0;
}
#ifdef CONFIG_PM
@@ -828,7 +826,7 @@ static struct platform_driver at91_twi_driver = {
.driver = {
.name = "at91_i2c",
.owner = THIS_MODULE,
- .of_match_table = atmel_twi_dt_ids,
+ .of_match_table = of_match_ptr(atmel_twi_dt_ids),
.pm = at91_twi_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index 98386d6..1be13ac 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -206,7 +206,9 @@ static int cbus_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
- return i2c_del_adapter(adapter);
+ i2c_del_adapter(adapter);
+
+ return 0;
}
static int cbus_i2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 7d1e590..cf20e06 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -137,7 +137,7 @@ static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
}
/* Generate a pulse on the i2c clock pin. */
-static void generic_i2c_clock_pulse(unsigned int scl_pin)
+static void davinci_i2c_clock_pulse(unsigned int scl_pin)
{
u16 i;
@@ -155,7 +155,7 @@ static void generic_i2c_clock_pulse(unsigned int scl_pin)
/* This routine does i2c bus recovery as specified in the
* i2c protocol Rev. 03 section 3.16 titled "Bus clear"
*/
-static void i2c_recover_bus(struct davinci_i2c_dev *dev)
+static void davinci_i2c_recover_bus(struct davinci_i2c_dev *dev)
{
u32 flag = 0;
struct davinci_i2c_platform_data *pdata = dev->pdata;
@@ -166,7 +166,7 @@ static void i2c_recover_bus(struct davinci_i2c_dev *dev)
flag |= DAVINCI_I2C_MDR_NACK;
/* write the data into mode register */
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
- generic_i2c_clock_pulse(pdata->scl_pin);
+ davinci_i2c_clock_pulse(pdata->scl_pin);
/* Send STOP */
flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
flag |= DAVINCI_I2C_MDR_STP;
@@ -289,7 +289,7 @@ static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev,
return -ETIMEDOUT;
} else {
to_cnt = 0;
- i2c_recover_bus(dev);
+ davinci_i2c_recover_bus(dev);
i2c_davinci_init(dev);
}
}
@@ -379,7 +379,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
dev->adapter.timeout);
if (r == 0) {
dev_err(dev->dev, "controller timed out\n");
- i2c_recover_bus(dev);
+ davinci_i2c_recover_bus(dev);
i2c_davinci_init(dev);
dev->buf_len = 0;
return -ETIMEDOUT;
@@ -643,7 +643,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev;
struct i2c_adapter *adap;
- struct resource *mem, *irq, *ioarea;
+ struct resource *mem, *irq;
int r;
/* NOTE: driver uses the static register mapping */
@@ -659,24 +659,18 @@ static int davinci_i2c_probe(struct platform_device *pdev)
return -ENODEV;
}
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "I2C region already claimed\n");
- return -EBUSY;
- }
-
- dev = kzalloc(sizeof(struct davinci_i2c_dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_i2c_dev),
+ GFP_KERNEL);
if (!dev) {
- r = -ENOMEM;
- goto err_release_region;
+ dev_err(&pdev->dev, "Memory allocation failed\n");
+ return -ENOMEM;
}
init_completion(&dev->cmd_complete);
#ifdef CONFIG_CPU_FREQ
init_completion(&dev->xfr_complete);
#endif
- dev->dev = get_device(&pdev->dev);
+ dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->pdata = dev->dev->platform_data;
platform_set_drvdata(pdev, dev);
@@ -686,10 +680,9 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->pdata = devm_kzalloc(&pdev->dev,
sizeof(struct davinci_i2c_platform_data), GFP_KERNEL);
- if (!dev->pdata) {
- r = -ENOMEM;
- goto err_free_mem;
- }
+ if (!dev->pdata)
+ return -ENOMEM;
+
memcpy(dev->pdata, &davinci_i2c_platform_data_default,
sizeof(struct davinci_i2c_platform_data));
if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
@@ -699,22 +692,21 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->pdata = &davinci_i2c_platform_data_default;
}
- dev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- r = -ENODEV;
- goto err_free_mem;
- }
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk))
+ return -ENODEV;
clk_prepare_enable(dev->clk);
- dev->base = ioremap(mem->start, resource_size(mem));
- if (!dev->base) {
- r = -EBUSY;
- goto err_mem_ioremap;
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base)) {
+ r = PTR_ERR(dev->base);
+ goto err_unuse_clocks;
}
i2c_davinci_init(dev);
- r = request_irq(dev->irq, i2c_davinci_isr, 0, pdev->name, dev);
+ r = devm_request_irq(&pdev->dev, dev->irq, i2c_davinci_isr, 0,
+ pdev->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
goto err_unuse_clocks;
@@ -723,7 +715,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
r = i2c_davinci_cpufreq_register(dev);
if (r) {
dev_err(&pdev->dev, "failed to register cpufreq\n");
- goto err_free_irq;
+ goto err_unuse_clocks;
}
adap = &dev->adapter;
@@ -740,50 +732,31 @@ static int davinci_i2c_probe(struct platform_device *pdev)
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(&pdev->dev, "failure adding adapter\n");
- goto err_free_irq;
+ goto err_unuse_clocks;
}
of_i2c_register_devices(adap);
return 0;
-err_free_irq:
- free_irq(dev->irq, dev);
err_unuse_clocks:
- iounmap(dev->base);
-err_mem_ioremap:
clk_disable_unprepare(dev->clk);
- clk_put(dev->clk);
dev->clk = NULL;
-err_free_mem:
- put_device(&pdev->dev);
- kfree(dev);
-err_release_region:
- release_mem_region(mem->start, resource_size(mem));
-
return r;
}
static int davinci_i2c_remove(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev = platform_get_drvdata(pdev);
- struct resource *mem;
i2c_davinci_cpufreq_deregister(dev);
i2c_del_adapter(&dev->adapter);
- put_device(&pdev->dev);
clk_disable_unprepare(dev->clk);
- clk_put(dev->clk);
dev->clk = NULL;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
- free_irq(dev->irq, dev);
- iounmap(dev->base);
- kfree(dev);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 94fd818..21fbb34 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -68,6 +68,7 @@
#define DW_IC_TXFLR 0x74
#define DW_IC_RXFLR 0x78
#define DW_IC_TX_ABRT_SOURCE 0x80
+#define DW_IC_ENABLE_STATUS 0x9c
#define DW_IC_COMP_PARAM_1 0xf4
#define DW_IC_COMP_TYPE 0xfc
#define DW_IC_COMP_TYPE_VALUE 0x44570140
@@ -248,6 +249,27 @@ static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
}
+static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
+{
+ int timeout = 100;
+
+ do {
+ dw_writel(dev, enable, DW_IC_ENABLE);
+ if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
+ return;
+
+ /*
+ * Wait 10 times the signaling period of the highest I2C
+ * transfer supported by the driver (for 400KHz this is
+ * 25us) as described in the DesignWare I2C databook.
+ */
+ usleep_range(25, 250);
+ } while (timeout--);
+
+ dev_warn(dev->dev, "timeout in %sabling adapter\n",
+ enable ? "en" : "dis");
+}
+
/**
* i2c_dw_init() - initialize the designware i2c master hardware
* @dev: device private data
@@ -278,7 +300,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
}
/* Disable the adapter */
- dw_writel(dev, 0, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, false);
/* set standard and fast speed deviders for high/low periods */
@@ -333,7 +355,7 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
return -ETIMEDOUT;
}
timeout--;
- mdelay(1);
+ usleep_range(1000, 1100);
}
return 0;
@@ -345,7 +367,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
u32 ic_con;
/* Disable the adapter */
- dw_writel(dev, 0, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, false);
/* set the slave (target) address */
dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
@@ -359,7 +381,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
dw_writel(dev, ic_con, DW_IC_CON);
/* Enable the adapter */
- dw_writel(dev, 1, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, true);
/* Enable interrupts */
dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
@@ -565,7 +587,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
/* no error */
if (likely(!dev->cmd_err)) {
/* Disable the adapter */
- dw_writel(dev, 0, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, false);
ret = num;
goto done;
}
@@ -578,7 +600,8 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = -EIO;
done:
- pm_runtime_put(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
mutex_unlock(&dev->lock);
return ret;
@@ -700,7 +723,7 @@ EXPORT_SYMBOL_GPL(i2c_dw_isr);
void i2c_dw_enable(struct dw_i2c_dev *dev)
{
/* Enable the adapter */
- dw_writel(dev, 1, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, true);
}
EXPORT_SYMBOL_GPL(i2c_dw_enable);
@@ -713,7 +736,7 @@ EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
void i2c_dw_disable(struct dw_i2c_dev *dev)
{
/* Disable controller */
- dw_writel(dev, 0, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, false);
/* Disable all interupts */
dw_writel(dev, 0, DW_IC_INTR_MASK);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 7c5e383..f6ed06c 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -208,68 +208,45 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
}
static int i2c_dw_pci_probe(struct pci_dev *pdev,
-const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
- unsigned long start, len;
- void __iomem *base;
int r;
struct dw_pci_controller *controller;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
- printk(KERN_ERR "dw_i2c_pci_probe: invalid driver data %ld\n",
+ dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__,
id->driver_data);
return -EINVAL;
}
controller = &dw_pci_controllers[id->driver_data];
- r = pci_enable_device(pdev);
+ r = pcim_enable_device(pdev);
if (r) {
dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
r);
- goto exit;
+ return r;
}
- /* Determine the address of the I2C area */
- start = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
- if (!start || len == 0) {
- dev_err(&pdev->dev, "base address not set\n");
- r = -ENODEV;
- goto exit;
- }
-
- r = pci_request_region(pdev, 0, DRIVER_NAME);
+ r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
if (r) {
- dev_err(&pdev->dev, "failed to request I2C region "
- "0x%lx-0x%lx\n", start,
- (unsigned long)pci_resource_end(pdev, 0));
- goto exit;
- }
-
- base = ioremap_nocache(start, len);
- if (!base) {
dev_err(&pdev->dev, "I/O memory remapping failed\n");
- r = -ENOMEM;
- goto err_release_region;
+ return r;
}
-
- dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
- if (!dev) {
- r = -ENOMEM;
- goto err_release_region;
- }
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
init_completion(&dev->cmd_complete);
mutex_init(&dev->lock);
dev->clk = NULL;
dev->controller = controller;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
- dev->base = base;
- dev->dev = get_device(&pdev->dev);
+ dev->base = pcim_iomap_table(pdev)[0];
+ dev->dev = &pdev->dev;
dev->functionality =
I2C_FUNC_I2C |
I2C_FUNC_SMBUS_BYTE |
@@ -284,7 +261,7 @@ const struct pci_device_id *id)
dev->rx_fifo_depth = controller->rx_fifo_depth;
r = i2c_dw_init(dev);
if (r)
- goto err_iounmap;
+ return r;
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
@@ -296,10 +273,11 @@ const struct pci_device_id *id)
snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci-%d",
adap->nr);
- r = request_irq(pdev->irq, i2c_dw_isr, IRQF_SHARED, adap->name, dev);
+ r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr, IRQF_SHARED,
+ adap->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
- goto err_iounmap;
+ return r;
}
i2c_dw_disable_int(dev);
@@ -307,24 +285,14 @@ const struct pci_device_id *id)
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(&pdev->dev, "failure adding adapter\n");
- goto err_free_irq;
+ return r;
}
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
-
-err_free_irq:
- free_irq(pdev->irq, dev);
-err_iounmap:
- iounmap(dev->base);
- put_device(&pdev->dev);
- kfree(dev);
-err_release_region:
- pci_release_region(pdev, 0);
-exit:
- return r;
}
static void i2c_dw_pci_remove(struct pci_dev *pdev)
@@ -336,11 +304,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
i2c_del_adapter(&dev->adapter);
- put_device(&pdev->dev);
-
- free_irq(dev->irq, dev);
- kfree(dev);
- pci_release_region(pdev, 0);
}
/* work with hotplug and coldplug */
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index e3085c4..8ec9133 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -56,20 +56,11 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
static int dw_i2c_acpi_configure(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
- struct acpi_device *adev;
- int busno, ret;
if (!ACPI_HANDLE(&pdev->dev))
return -ENODEV;
- ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
- if (ret)
- return -ENODEV;
-
dev->adapter.nr = -1;
- if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &busno))
- dev->adapter.nr = busno;
-
dev->tx_fifo_depth = 32;
dev->rx_fifo_depth = 32;
return 0;
@@ -92,7 +83,7 @@ static int dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
- struct resource *mem, *ioarea;
+ struct resource *mem;
int irq, r;
/* NOTE: driver uses the static register mapping */
@@ -108,32 +99,25 @@ static int dw_i2c_probe(struct platform_device *pdev)
return irq; /* -ENXIO */
}
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "I2C region already claimed\n");
- return -EBUSY;
- }
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
- dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
- if (!dev) {
- r = -ENOMEM;
- goto err_release_region;
- }
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
init_completion(&dev->cmd_complete);
mutex_init(&dev->lock);
- dev->dev = get_device(&pdev->dev);
+ dev->dev = &pdev->dev;
dev->irq = irq;
platform_set_drvdata(pdev, dev);
- dev->clk = clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
- if (IS_ERR(dev->clk)) {
- r = -ENODEV;
- goto err_free_mem;
- }
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
clk_prepare_enable(dev->clk);
dev->functionality =
@@ -146,13 +130,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
- dev->base = ioremap(mem->start, resource_size(mem));
- if (dev->base == NULL) {
- dev_err(&pdev->dev, "failure mapping io resources\n");
- r = -EBUSY;
- goto err_unuse_clocks;
- }
-
/* Try first if we can configure the device from ACPI */
r = dw_i2c_acpi_configure(pdev);
if (r) {
@@ -164,13 +141,14 @@ static int dw_i2c_probe(struct platform_device *pdev)
}
r = i2c_dw_init(dev);
if (r)
- goto err_iounmap;
+ return r;
i2c_dw_disable_int(dev);
- r = request_irq(dev->irq, i2c_dw_isr, IRQF_SHARED, pdev->name, dev);
+ r = devm_request_irq(&pdev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
+ pdev->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
- goto err_iounmap;
+ return r;
}
adap = &dev->adapter;
@@ -186,57 +164,32 @@ static int dw_i2c_probe(struct platform_device *pdev)
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(&pdev->dev, "failure adding adapter\n");
- goto err_free_irq;
+ return r;
}
of_i2c_register_devices(adap);
acpi_i2c_register_devices(adap);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- pm_runtime_put(&pdev->dev);
return 0;
-
-err_free_irq:
- free_irq(dev->irq, dev);
-err_iounmap:
- iounmap(dev->base);
-err_unuse_clocks:
- clk_disable_unprepare(dev->clk);
- clk_put(dev->clk);
- dev->clk = NULL;
-err_free_mem:
- put_device(&pdev->dev);
- kfree(dev);
-err_release_region:
- release_mem_region(mem->start, resource_size(mem));
-
- return r;
}
static int dw_i2c_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
- struct resource *mem;
pm_runtime_get_sync(&pdev->dev);
i2c_del_adapter(&dev->adapter);
- put_device(&pdev->dev);
-
- clk_disable_unprepare(dev->clk);
- clk_put(dev->clk);
- dev->clk = NULL;
i2c_dw_disable(dev);
- free_irq(dev->irq, dev);
- kfree(dev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index f3fa433..bc6e139 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -85,23 +85,29 @@ static int i2c_gpio_getscl(void *data)
return gpio_get_value(pdata->scl_pin);
}
-static int of_i2c_gpio_probe(struct device_node *np,
- struct i2c_gpio_platform_data *pdata)
+static int of_i2c_gpio_get_pins(struct device_node *np,
+ unsigned int *sda_pin, unsigned int *scl_pin)
{
- u32 reg;
-
if (of_gpio_count(np) < 2)
return -ENODEV;
- pdata->sda_pin = of_get_gpio(np, 0);
- pdata->scl_pin = of_get_gpio(np, 1);
+ *sda_pin = of_get_gpio(np, 0);
+ *scl_pin = of_get_gpio(np, 1);
- if (!gpio_is_valid(pdata->sda_pin) || !gpio_is_valid(pdata->scl_pin)) {
+ if (!gpio_is_valid(*sda_pin) || !gpio_is_valid(*scl_pin)) {
pr_err("%s: invalid GPIO pins, sda=%d/scl=%d\n",
- np->full_name, pdata->sda_pin, pdata->scl_pin);
+ np->full_name, *sda_pin, *scl_pin);
return -ENODEV;
}
+ return 0;
+}
+
+static void of_i2c_gpio_get_props(struct device_node *np,
+ struct i2c_gpio_platform_data *pdata)
+{
+ u32 reg;
+
of_property_read_u32(np, "i2c-gpio,delay-us", &pdata->udelay);
if (!of_property_read_u32(np, "i2c-gpio,timeout-ms", &reg))
@@ -113,8 +119,6 @@ static int of_i2c_gpio_probe(struct device_node *np,
of_property_read_bool(np, "i2c-gpio,scl-open-drain");
pdata->scl_is_output_only =
of_property_read_bool(np, "i2c-gpio,scl-output-only");
-
- return 0;
}
static int i2c_gpio_probe(struct platform_device *pdev)
@@ -123,31 +127,52 @@ static int i2c_gpio_probe(struct platform_device *pdev)
struct i2c_gpio_platform_data *pdata;
struct i2c_algo_bit_data *bit_data;
struct i2c_adapter *adap;
+ unsigned int sda_pin, scl_pin;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- adap = &priv->adap;
- bit_data = &priv->bit_data;
- pdata = &priv->pdata;
-
+ /* First get the GPIO pins; if it fails, we'll defer the probe. */
if (pdev->dev.of_node) {
- ret = of_i2c_gpio_probe(pdev->dev.of_node, pdata);
+ ret = of_i2c_gpio_get_pins(pdev->dev.of_node,
+ &sda_pin, &scl_pin);
if (ret)
return ret;
} else {
if (!pdev->dev.platform_data)
return -ENXIO;
- memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+ pdata = pdev->dev.platform_data;
+ sda_pin = pdata->sda_pin;
+ scl_pin = pdata->scl_pin;
}
- ret = gpio_request(pdata->sda_pin, "sda");
- if (ret)
+ ret = gpio_request(sda_pin, "sda");
+ if (ret) {
+ if (ret == -EINVAL)
+ ret = -EPROBE_DEFER; /* Try again later */
goto err_request_sda;
- ret = gpio_request(pdata->scl_pin, "scl");
- if (ret)
+ }
+ ret = gpio_request(scl_pin, "scl");
+ if (ret) {
+ if (ret == -EINVAL)
+ ret = -EPROBE_DEFER; /* Try again later */
goto err_request_scl;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_add_bus;
+ }
+ adap = &priv->adap;
+ bit_data = &priv->bit_data;
+ pdata = &priv->pdata;
+
+ if (pdev->dev.of_node) {
+ pdata->sda_pin = sda_pin;
+ pdata->scl_pin = scl_pin;
+ of_i2c_gpio_get_props(pdev->dev.of_node, pdata);
+ } else {
+ memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+ }
if (pdata->sda_is_open_drain) {
gpio_direction_output(pdata->sda_pin, 1);
@@ -211,9 +236,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
return 0;
err_add_bus:
- gpio_free(pdata->scl_pin);
+ gpio_free(scl_pin);
err_request_scl:
- gpio_free(pdata->sda_pin);
+ gpio_free(sda_pin);
err_request_sda:
return ret;
}
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index 323fa01..0fb6597 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -1082,8 +1082,7 @@ static void intel_mid_i2c_remove(struct pci_dev *dev)
{
struct intel_mid_i2c_private *mrst = pci_get_drvdata(dev);
intel_mid_i2c_disable(&mrst->adap);
- if (i2c_del_adapter(&mrst->adap))
- dev_err(&dev->dev, "Failed to delete i2c adapter");
+ i2c_del_adapter(&mrst->adap);
free_irq(dev->irq, mrst);
iounmap(mrst->base);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 130f02c..cd82eb4 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -183,7 +183,7 @@ struct ismt_priv {
/**
* ismt_ids - PCI device IDs supported by this driver
*/
-static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 8b20ef8..3bbd65d 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -701,9 +701,8 @@ static int
mv64xxx_i2c_remove(struct platform_device *dev)
{
struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
- int rc;
- rc = i2c_del_adapter(&drv_data->adapter);
+ i2c_del_adapter(&drv_data->adapter);
free_irq(drv_data->irq, drv_data);
mv64xxx_i2c_unmap_regs(drv_data);
#if defined(CONFIG_HAVE_CLK)
@@ -715,7 +714,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
#endif
kfree(drv_data);
- return rc;
+ return 0;
}
static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 120f246..2039f23 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -31,7 +31,6 @@
#include <linux/of_i2c.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/fsl/mxs-dma.h>
#define DRIVER_NAME "mxs-i2c"
@@ -56,6 +55,7 @@
#define MXS_I2C_CTRL1_SET (0x44)
#define MXS_I2C_CTRL1_CLR (0x48)
+#define MXS_I2C_CTRL1_CLR_GOT_A_NAK 0x10000000
#define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80
#define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40
#define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20
@@ -65,6 +65,10 @@
#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+#define MXS_I2C_STAT (0x50)
+#define MXS_I2C_STAT_BUS_BUSY 0x00000800
+#define MXS_I2C_STAT_CLK_GEN_BUSY 0x00000400
+
#define MXS_I2C_DATA (0xa0)
#define MXS_I2C_DEBUG0 (0xb0)
@@ -113,9 +117,7 @@ struct mxs_i2c_dev {
uint32_t timing1;
/* DMA support components */
- int dma_channel;
struct dma_chan *dmach;
- struct mxs_dma_data dma_data;
uint32_t pio_data[2];
uint32_t addr_data;
struct scatterlist sg_io[2];
@@ -297,12 +299,10 @@ static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
cond_resched();
}
- writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
-
return 0;
}
-static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
@@ -323,9 +323,50 @@ static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c)
writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
i2c->regs + MXS_I2C_CTRL1_CLR);
+ /*
+ * When ending a transfer with a stop, we have to wait for the bus to
+ * go idle before we report the transfer as completed. Otherwise the
+ * start of the next transfer may race with the end of the current one.
+ */
+ while (last && (readl(i2c->regs + MXS_I2C_STAT) &
+ (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
return 0;
}
+static int mxs_i2c_pio_check_error_state(struct mxs_i2c_dev *i2c)
+{
+ u32 state;
+
+ state = readl(i2c->regs + MXS_I2C_CTRL1_CLR) & MXS_I2C_IRQ_MASK;
+
+ if (state & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ)
+ i2c->cmd_err = -ENXIO;
+ else if (state & (MXS_I2C_CTRL1_EARLY_TERM_IRQ |
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ |
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ |
+ MXS_I2C_CTRL1_SLAVE_IRQ))
+ i2c->cmd_err = -EIO;
+
+ return i2c->cmd_err;
+}
+
+static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd)
+{
+ u32 reg;
+
+ writel(cmd, i2c->regs + MXS_I2C_CTRL0);
+
+ /* readback makes sure the write is latched into hardware */
+ reg = readl(i2c->regs + MXS_I2C_CTRL0);
+ reg |= MXS_I2C_CTRL0_RUN;
+ writel(reg, i2c->regs + MXS_I2C_CTRL0);
+}
+
static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
struct i2c_msg *msg, uint32_t flags)
{
@@ -341,23 +382,26 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
addr_data |= I2C_SMBUS_READ;
/* SELECT command. */
- writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_SELECT,
- i2c->regs + MXS_I2C_CTRL0);
+ mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT);
ret = mxs_i2c_pio_wait_dmareq(i2c);
if (ret)
return ret;
writel(addr_data, i2c->regs + MXS_I2C_DATA);
+ writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
- ret = mxs_i2c_pio_wait_cplt(i2c);
+ ret = mxs_i2c_pio_wait_cplt(i2c, 0);
if (ret)
return ret;
+ if (mxs_i2c_pio_check_error_state(i2c))
+ goto cleanup;
+
/* READ command. */
- writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_READ | flags |
- MXS_I2C_CTRL0_XFER_COUNT(msg->len),
- i2c->regs + MXS_I2C_CTRL0);
+ mxs_i2c_pio_trigger_cmd(i2c,
+ MXS_CMD_I2C_READ | flags |
+ MXS_I2C_CTRL0_XFER_COUNT(msg->len));
for (i = 0; i < msg->len; i++) {
if ((i & 3) == 0) {
@@ -365,6 +409,8 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
if (ret)
return ret;
data = readl(i2c->regs + MXS_I2C_DATA);
+ writel(MXS_I2C_DEBUG0_DMAREQ,
+ i2c->regs + MXS_I2C_DEBUG0_CLR);
}
msg->buf[i] = data & 0xff;
data >>= 8;
@@ -373,9 +419,9 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
addr_data |= I2C_SMBUS_WRITE;
/* WRITE command. */
- writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_WRITE | flags |
- MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1),
- i2c->regs + MXS_I2C_CTRL0);
+ mxs_i2c_pio_trigger_cmd(i2c,
+ MXS_CMD_I2C_WRITE | flags |
+ MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1));
/*
* The LSB of data buffer is the first byte blasted across
@@ -391,6 +437,8 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
if (ret)
return ret;
writel(data, i2c->regs + MXS_I2C_DATA);
+ writel(MXS_I2C_DEBUG0_DMAREQ,
+ i2c->regs + MXS_I2C_DEBUG0_CLR);
}
}
@@ -401,13 +449,19 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
if (ret)
return ret;
writel(data, i2c->regs + MXS_I2C_DATA);
+ writel(MXS_I2C_DEBUG0_DMAREQ,
+ i2c->regs + MXS_I2C_DEBUG0_CLR);
}
}
- ret = mxs_i2c_pio_wait_cplt(i2c);
+ ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP);
if (ret)
return ret;
+ /* make sure we capture any occurred error into cmd_err */
+ mxs_i2c_pio_check_error_state(i2c);
+
+cleanup:
/* Clear any dangling IRQs and re-enable interrupts. */
writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
@@ -439,12 +493,12 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
* using PIO mode while longer transfers use DMA. The 8 byte border is
* based on this empirical measurement and a lot of previous frobbing.
*/
+ i2c->cmd_err = 0;
if (msg->len < 8) {
ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
if (ret)
mxs_i2c_reset(i2c);
} else {
- i2c->cmd_err = 0;
INIT_COMPLETION(i2c->cmd_complete);
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
if (ret)
@@ -454,13 +508,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
msecs_to_jiffies(1000));
if (ret == 0)
goto timeout;
+ }
- if (i2c->cmd_err == -ENXIO)
- mxs_i2c_reset(i2c);
-
- ret = i2c->cmd_err;
+ if (i2c->cmd_err == -ENXIO) {
+ /*
+ * If the transfer fails with a NAK from the slave the
+ * controller halts until it gets told to return to idle state.
+ */
+ writel(MXS_I2C_CTRL1_CLR_GOT_A_NAK,
+ i2c->regs + MXS_I2C_CTRL1_SET);
}
+ ret = i2c->cmd_err;
+
dev_dbg(i2c->dev, "Done with err=%d\n", ret);
return ret;
@@ -518,21 +578,6 @@ static const struct i2c_algorithm mxs_i2c_algo = {
.functionality = mxs_i2c_func,
};
-static bool mxs_i2c_dma_filter(struct dma_chan *chan, void *param)
-{
- struct mxs_i2c_dev *i2c = param;
-
- if (!mxs_dma_is_apbx(chan))
- return false;
-
- if (chan->chan_id != i2c->dma_channel)
- return false;
-
- chan->private = &i2c->dma_data;
-
- return true;
-}
-
static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, int speed)
{
/* The I2C block clock run at 24MHz */
@@ -577,17 +622,6 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
struct device_node *node = dev->of_node;
int ret;
- /*
- * TODO: This is a temporary solution and should be changed
- * to use generic DMA binding later when the helpers get in.
- */
- ret = of_property_read_u32(node, "fsl,i2c-dma-channel",
- &i2c->dma_channel);
- if (ret) {
- dev_err(dev, "Failed to get DMA channel!\n");
- return -ENODEV;
- }
-
ret = of_property_read_u32(node, "clock-frequency", &speed);
if (ret) {
dev_warn(dev, "No I2C speed selected, using 100kHz\n");
@@ -607,8 +641,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
struct pinctrl *pinctrl;
struct resource *res;
resource_size_t res_size;
- int err, irq, dmairq;
- dma_cap_mask_t mask;
+ int err, irq;
pinctrl = devm_pinctrl_get_select_default(dev);
if (IS_ERR(pinctrl))
@@ -620,9 +653,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- dmairq = platform_get_irq(pdev, 1);
- if (!res || irq < 0 || dmairq < 0)
+ if (!res || irq < 0)
return -ENOENT;
res_size = resource_size(res);
@@ -648,10 +680,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
}
/* Setup the DMA */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- i2c->dma_data.chan_irq = dmairq;
- i2c->dmach = dma_request_channel(mask, mxs_i2c_dma_filter, i2c);
+ i2c->dmach = dma_request_slave_channel(dev, "rx-tx");
if (!i2c->dmach) {
dev_err(dev, "Failed to request dma\n");
return -ENODEV;
@@ -686,11 +715,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
static int mxs_i2c_remove(struct platform_device *pdev)
{
struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
- int ret;
- ret = i2c_del_adapter(&i2c->adapter);
- if (ret)
- return -EBUSY;
+ i2c_del_adapter(&i2c->adapter);
if (i2c->dmach)
dma_release_channel(i2c->dmach);
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index 29015eb..2ca268d 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -164,11 +164,7 @@ static int __init nforce2_s4985_init(void)
}
/* Unregister physical bus */
- error = i2c_del_adapter(nforce2_smbus);
- if (error) {
- dev_err(&nforce2_smbus->dev, "Physical bus removal failed\n");
- goto ERROR0;
- }
+ i2c_del_adapter(nforce2_smbus);
printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n");
/* Define the 5 virtual adapters and algorithms structures */
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 935585e..956fe32 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -183,7 +183,7 @@ static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
struct octeon_i2c *i2c = dev_id;
octeon_i2c_int_disable(i2c);
- wake_up_interruptible(&i2c->queue);
+ wake_up(&i2c->queue);
return IRQ_HANDLED;
}
@@ -206,9 +206,9 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
octeon_i2c_int_enable(i2c);
- result = wait_event_interruptible_timeout(i2c->queue,
- octeon_i2c_test_iflg(i2c),
- i2c->adap.timeout);
+ result = wait_event_timeout(i2c->queue,
+ octeon_i2c_test_iflg(i2c),
+ i2c->adap.timeout);
octeon_i2c_int_disable(i2c);
@@ -440,7 +440,7 @@ static struct i2c_adapter octeon_i2c_ops = {
.owner = THIS_MODULE,
.name = "OCTEON adapter",
.algo = &octeon_i2c_algo,
- .timeout = 2,
+ .timeout = HZ / 50,
};
/**
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index da54e67..8dc90da 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -213,14 +213,8 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
static int i2c_powermac_remove(struct platform_device *dev)
{
struct i2c_adapter *adapter = platform_get_drvdata(dev);
- int rc;
-
- rc = i2c_del_adapter(adapter);
- /* We aren't that prepared to deal with this... */
- if (rc)
- printk(KERN_WARNING
- "i2c-powermac.c: Failed to remove bus %s !\n",
- adapter->name);
+
+ i2c_del_adapter(adapter);
memset(adapter, 0, sizeof(*adapter));
return 0;
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index 8acef65..37a84c8 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -234,21 +234,15 @@ static int puv3_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct resource *mem;
- int rc;
- rc = i2c_del_adapter(adapter);
- if (rc) {
- dev_err(&pdev->dev, "Adapter '%s' delete fail\n",
- adapter->name);
- return rc;
- }
+ i2c_del_adapter(adapter);
put_device(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
- return rc;
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 1e88e8d..ea6d45d 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1053,16 +1053,13 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(i2c_pxa_dt_ids, &pdev->dev);
- int ret;
if (!of_id)
return 1;
- ret = of_alias_get_id(np, "i2c");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
- return ret;
- }
- pdev->id = ret;
+
+ /* For device tree we always use the dynamic or alias-assigned ID */
+ i2c->adap.nr = -1;
+
if (of_get_property(np, "mrvl,i2c-polling", NULL))
i2c->use_pio = 1;
if (of_get_property(np, "mrvl,i2c-fast-mode", NULL))
@@ -1100,6 +1097,9 @@ static int i2c_pxa_probe(struct platform_device *dev)
goto emalloc;
}
+ /* Default adapter num to device id; i2c_pxa_probe_dt can override. */
+ i2c->adap.nr = dev->id;
+
ret = i2c_pxa_probe_dt(dev, i2c, &i2c_type);
if (ret > 0)
ret = i2c_pxa_probe_pdata(dev, i2c, &i2c_type);
@@ -1124,9 +1124,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- i2c->adap.nr = dev->id;
- snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
- i2c->adap.nr);
+ strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(i2c->clk)) {
@@ -1169,7 +1167,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
} else {
i2c->adap.algo = &i2c_pxa_algorithm;
ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
- i2c->adap.name, i2c);
+ dev_name(&dev->dev), i2c);
if (ret)
goto ereqirq;
}
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index f6b880b..6e8ee92 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -42,9 +42,46 @@
#include <asm/irq.h>
-#include <plat/regs-iic.h>
#include <linux/platform_data/i2c-s3c2410.h>
+/* see s3c2410x user guide, v1.1, section 9 (p447) for more info */
+
+#define S3C2410_IICCON 0x00
+#define S3C2410_IICSTAT 0x04
+#define S3C2410_IICADD 0x08
+#define S3C2410_IICDS 0x0C
+#define S3C2440_IICLC 0x10
+
+#define S3C2410_IICCON_ACKEN (1 << 7)
+#define S3C2410_IICCON_TXDIV_16 (0 << 6)
+#define S3C2410_IICCON_TXDIV_512 (1 << 6)
+#define S3C2410_IICCON_IRQEN (1 << 5)
+#define S3C2410_IICCON_IRQPEND (1 << 4)
+#define S3C2410_IICCON_SCALE(x) ((x) & 0xf)
+#define S3C2410_IICCON_SCALEMASK (0xf)
+
+#define S3C2410_IICSTAT_MASTER_RX (2 << 6)
+#define S3C2410_IICSTAT_MASTER_TX (3 << 6)
+#define S3C2410_IICSTAT_SLAVE_RX (0 << 6)
+#define S3C2410_IICSTAT_SLAVE_TX (1 << 6)
+#define S3C2410_IICSTAT_MODEMASK (3 << 6)
+
+#define S3C2410_IICSTAT_START (1 << 5)
+#define S3C2410_IICSTAT_BUSBUSY (1 << 5)
+#define S3C2410_IICSTAT_TXRXEN (1 << 4)
+#define S3C2410_IICSTAT_ARBITR (1 << 3)
+#define S3C2410_IICSTAT_ASSLAVE (1 << 2)
+#define S3C2410_IICSTAT_ADDR0 (1 << 1)
+#define S3C2410_IICSTAT_LASTBIT (1 << 0)
+
+#define S3C2410_IICLC_SDA_DELAY0 (0 << 0)
+#define S3C2410_IICLC_SDA_DELAY5 (1 << 0)
+#define S3C2410_IICLC_SDA_DELAY10 (2 << 0)
+#define S3C2410_IICLC_SDA_DELAY15 (3 << 0)
+#define S3C2410_IICLC_SDA_DELAY_MASK (3 << 0)
+
+#define S3C2410_IICLC_FILTER_ON (1 << 2)
+
/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
#define QUIRK_S3C2440 (1 << 0)
#define QUIRK_HDMIPHY (1 << 1)
@@ -309,6 +346,12 @@ static inline int is_lastmsg(struct s3c24xx_i2c *i2c)
static inline int is_msglast(struct s3c24xx_i2c *i2c)
{
+ /* msg->len is always 1 for the first byte of smbus block read.
+ * Actual length will be read from slave. More bytes will be
+ * read according to the length then. */
+ if (i2c->msg->flags & I2C_M_RECV_LEN && i2c->msg->len == 1)
+ return 0;
+
return i2c->msg_ptr == i2c->msg->len-1;
}
@@ -448,6 +491,9 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
byte = readb(i2c->regs + S3C2410_IICDS);
i2c->msg->buf[i2c->msg_ptr++] = byte;
+ /* Add actual length to read for smbus block read */
+ if (i2c->msg->flags & I2C_M_RECV_LEN && i2c->msg->len == 1)
+ i2c->msg->len += byte;
prepare_read:
if (is_msglast(i2c)) {
/* last byte of buffer */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b714776..b60ff90 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -25,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/i2c-tegra.h>
#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/module.h>
@@ -172,7 +171,7 @@ struct tegra_i2c_dev {
u8 *msg_buf;
size_t msg_buf_remaining;
int msg_read;
- unsigned long bus_clk_rate;
+ u32 bus_clk_rate;
bool is_suspended;
};
@@ -694,7 +693,6 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.clk_divisor_std_fast_mode = 0x19,
};
-#if defined(CONFIG_OF)
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
@@ -704,16 +702,13 @@ static const struct of_device_id tegra_i2c_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
-#endif
static int tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
- struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
struct clk *div_clk;
struct clk *fast_clk;
- const unsigned int *prop;
void __iomem *base;
int irq;
int ret = 0;
@@ -754,23 +749,16 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
- i2c_dev->bus_clk_rate = 100000; /* default clock rate */
- if (pdata) {
- i2c_dev->bus_clk_rate = pdata->bus_clk_rate;
-
- } else if (i2c_dev->dev->of_node) { /* if there is a device tree node ... */
- prop = of_get_property(i2c_dev->dev->of_node,
- "clock-frequency", NULL);
- if (prop)
- i2c_dev->bus_clk_rate = be32_to_cpup(prop);
- }
+ ret = of_property_read_u32(i2c_dev->dev->of_node, "clock-frequency",
+ &i2c_dev->bus_clk_rate);
+ if (ret)
+ i2c_dev->bus_clk_rate = 100000; /* default clock rate */
i2c_dev->hw = &tegra20_i2c_hw;
if (pdev->dev.of_node) {
const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_i2c_of_match),
- &pdev->dev);
+ match = of_match_device(tegra_i2c_of_match, &pdev->dev);
i2c_dev->hw = match->data;
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
@@ -876,7 +864,7 @@ static struct platform_driver tegra_i2c_driver = {
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(tegra_i2c_of_match),
+ .of_match_table = tegra_i2c_of_match,
.pm = TEGRA_I2C_PM,
},
};
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index f45c32c..c68450c 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -421,11 +421,10 @@ error:
static int vprbrd_i2c_remove(struct platform_device *pdev)
{
struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev);
- int ret;
- ret = i2c_del_adapter(&vb_i2c->i2c);
+ i2c_del_adapter(&vb_i2c->i2c);
- return ret;
+ return 0;
}
static struct platform_driver vprbrd_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 332c720..3d0f052 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -312,10 +312,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
/* last message in transfer -> STOP */
data |= XIIC_TX_DYN_STOP_MASK;
dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
-
- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
- } else
- xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
}
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 991d38d..6b63cc7 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -27,7 +27,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -47,7 +49,7 @@
/* core_lock protects i2c_adapter_idr, and guarantees
that device detection, deletion of detected devices, and attach_adapter
- and detach_adapter calls are serialized */
+ calls are serialized */
static DEFINE_MUTEX(core_lock);
static DEFINE_IDR(i2c_adapter_idr);
@@ -91,7 +93,6 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-#ifdef CONFIG_HOTPLUG
/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -105,9 +106,129 @@ static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#else
-#define i2c_device_uevent NULL
-#endif /* CONFIG_HOTPLUG */
+/* i2c bus recovery routines */
+static int get_scl_gpio_value(struct i2c_adapter *adap)
+{
+ return gpio_get_value(adap->bus_recovery_info->scl_gpio);
+}
+
+static void set_scl_gpio_value(struct i2c_adapter *adap, int val)
+{
+ gpio_set_value(adap->bus_recovery_info->scl_gpio, val);
+}
+
+static int get_sda_gpio_value(struct i2c_adapter *adap)
+{
+ return gpio_get_value(adap->bus_recovery_info->sda_gpio);
+}
+
+static int i2c_get_gpios_for_recovery(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ struct device *dev = &adap->dev;
+ int ret = 0;
+
+ ret = gpio_request_one(bri->scl_gpio, GPIOF_OPEN_DRAIN |
+ GPIOF_OUT_INIT_HIGH, "i2c-scl");
+ if (ret) {
+ dev_warn(dev, "Can't get SCL gpio: %d\n", bri->scl_gpio);
+ return ret;
+ }
+
+ if (bri->get_sda) {
+ if (gpio_request_one(bri->sda_gpio, GPIOF_IN, "i2c-sda")) {
+ /* work without SDA polling */
+ dev_warn(dev, "Can't get SDA gpio: %d. Not using SDA polling\n",
+ bri->sda_gpio);
+ bri->get_sda = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static void i2c_put_gpios_for_recovery(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+
+ if (bri->get_sda)
+ gpio_free(bri->sda_gpio);
+
+ gpio_free(bri->scl_gpio);
+}
+
+/*
+ * We are generating clock pulses. ndelay() determines durating of clk pulses.
+ * We will generate clock with rate 100 KHz and so duration of both clock levels
+ * is: delay in ns = (10^6 / 100) / 2
+ */
+#define RECOVERY_NDELAY 5000
+#define RECOVERY_CLK_CNT 9
+
+static int i2c_generic_recovery(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ int i = 0, val = 1, ret = 0;
+
+ if (bri->prepare_recovery)
+ bri->prepare_recovery(bri);
+
+ /*
+ * By this time SCL is high, as we need to give 9 falling-rising edges
+ */
+ while (i++ < RECOVERY_CLK_CNT * 2) {
+ if (val) {
+ /* Break if SDA is high */
+ if (bri->get_sda && bri->get_sda(adap))
+ break;
+ /* SCL shouldn't be low here */
+ if (!bri->get_scl(adap)) {
+ dev_err(&adap->dev,
+ "SCL is stuck low, exit recovery\n");
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ val = !val;
+ bri->set_scl(adap, val);
+ ndelay(RECOVERY_NDELAY);
+ }
+
+ if (bri->unprepare_recovery)
+ bri->unprepare_recovery(bri);
+
+ return ret;
+}
+
+int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+{
+ adap->bus_recovery_info->set_scl(adap, 1);
+ return i2c_generic_recovery(adap);
+}
+
+int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+{
+ int ret;
+
+ ret = i2c_get_gpios_for_recovery(adap);
+ if (ret)
+ return ret;
+
+ ret = i2c_generic_recovery(adap);
+ i2c_put_gpios_for_recovery(adap);
+
+ return ret;
+}
+
+int i2c_recover_bus(struct i2c_adapter *adap)
+{
+ if (!adap->bus_recovery_info)
+ return -EOPNOTSUPP;
+
+ dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ return adap->bus_recovery_info->recover_bus(adap);
+}
static int i2c_device_probe(struct device *dev)
{
@@ -902,6 +1023,39 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
"Failed to create compatibility class link\n");
#endif
+ /* bus recovery specific initialization */
+ if (adap->bus_recovery_info) {
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+
+ if (!bri->recover_bus) {
+ dev_err(&adap->dev, "No recover_bus() found, not using recovery\n");
+ adap->bus_recovery_info = NULL;
+ goto exit_recovery;
+ }
+
+ /* Generic GPIO recovery */
+ if (bri->recover_bus == i2c_generic_gpio_recovery) {
+ if (!gpio_is_valid(bri->scl_gpio)) {
+ dev_err(&adap->dev, "Invalid SCL gpio, not using recovery\n");
+ adap->bus_recovery_info = NULL;
+ goto exit_recovery;
+ }
+
+ if (gpio_is_valid(bri->sda_gpio))
+ bri->get_sda = get_sda_gpio_value;
+ else
+ bri->get_sda = NULL;
+
+ bri->get_scl = get_scl_gpio_value;
+ bri->set_scl = set_scl_gpio_value;
+ } else if (!bri->set_scl || !bri->get_scl) {
+ /* Generic SCL recovery */
+ dev_err(&adap->dev, "No {get|set}_gpio() found, not using recovery\n");
+ adap->bus_recovery_info = NULL;
+ }
+ }
+
+exit_recovery:
/* create pre-declared device nodes */
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -921,13 +1075,35 @@ out_list:
}
/**
+ * __i2c_add_numbered_adapter - i2c_add_numbered_adapter where nr is never -1
+ * @adap: the adapter to register (with adap->nr initialized)
+ * Context: can sleep
+ *
+ * See i2c_add_numbered_adapter() for details.
+ */
+static int __i2c_add_numbered_adapter(struct i2c_adapter *adap)
+{
+ int id;
+
+ mutex_lock(&core_lock);
+ id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
+ GFP_KERNEL);
+ mutex_unlock(&core_lock);
+ if (id < 0)
+ return id == -ENOSPC ? -EBUSY : id;
+
+ return i2c_register_adapter(adap);
+}
+
+/**
* i2c_add_adapter - declare i2c adapter, use dynamic bus number
* @adapter: the adapter to add
* Context: can sleep
*
* This routine is used to declare an I2C adapter when its bus number
- * doesn't matter. Examples: for I2C adapters dynamically added by
- * USB links or PCI plugin cards.
+ * doesn't matter or when its bus number is specified by an dt alias.
+ * Examples of bases when the bus number doesn't matter: I2C adapters
+ * dynamically added by USB links or PCI plugin cards.
*
* When this returns zero, a new bus number was allocated and stored
* in adap->nr, and the specified adapter became available for clients.
@@ -935,8 +1111,17 @@ out_list:
*/
int i2c_add_adapter(struct i2c_adapter *adapter)
{
+ struct device *dev = &adapter->dev;
int id;
+ if (dev->of_node) {
+ id = of_alias_get_id(dev->of_node, "i2c");
+ if (id >= 0) {
+ adapter->nr = id;
+ return __i2c_add_numbered_adapter(adapter);
+ }
+ }
+
mutex_lock(&core_lock);
id = idr_alloc(&i2c_adapter_idr, adapter,
__i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
@@ -975,26 +1160,17 @@ EXPORT_SYMBOL(i2c_add_adapter);
*/
int i2c_add_numbered_adapter(struct i2c_adapter *adap)
{
- int id;
-
if (adap->nr == -1) /* -1 means dynamically assign bus id */
return i2c_add_adapter(adap);
- mutex_lock(&core_lock);
- id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
- GFP_KERNEL);
- mutex_unlock(&core_lock);
- if (id < 0)
- return id == -ENOSPC ? -EBUSY : id;
- return i2c_register_adapter(adap);
+ return __i2c_add_numbered_adapter(adap);
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
-static int i2c_do_del_adapter(struct i2c_driver *driver,
+static void i2c_do_del_adapter(struct i2c_driver *driver,
struct i2c_adapter *adapter)
{
struct i2c_client *client, *_n;
- int res;
/* Remove the devices we created ourselves as the result of hardware
* probing (using a driver's detect method) */
@@ -1006,16 +1182,6 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
i2c_unregister_device(client);
}
}
-
- if (!driver->detach_adapter)
- return 0;
- dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n",
- driver->driver.name);
- res = driver->detach_adapter(adapter);
- if (res)
- dev_err(&adapter->dev, "detach_adapter failed (%d) "
- "for driver [%s]\n", res, driver->driver.name);
- return res;
}
static int __unregister_client(struct device *dev, void *dummy)
@@ -1036,7 +1202,8 @@ static int __unregister_dummy(struct device *dev, void *dummy)
static int __process_removed_adapter(struct device_driver *d, void *data)
{
- return i2c_do_del_adapter(to_i2c_driver(d), data);
+ i2c_do_del_adapter(to_i2c_driver(d), data);
+ return 0;
}
/**
@@ -1047,9 +1214,8 @@ static int __process_removed_adapter(struct device_driver *d, void *data)
* This unregisters an I2C adapter which was previously registered
* by @i2c_add_adapter or @i2c_add_numbered_adapter.
*/
-int i2c_del_adapter(struct i2c_adapter *adap)
+void i2c_del_adapter(struct i2c_adapter *adap)
{
- int res = 0;
struct i2c_adapter *found;
struct i2c_client *client, *next;
@@ -1060,16 +1226,14 @@ int i2c_del_adapter(struct i2c_adapter *adap)
if (found != adap) {
pr_debug("i2c-core: attempting to delete unregistered "
"adapter [%s]\n", adap->name);
- return -EINVAL;
+ return;
}
/* Tell drivers about this removal */
mutex_lock(&core_lock);
- res = bus_for_each_drv(&i2c_bus_type, NULL, adap,
+ bus_for_each_drv(&i2c_bus_type, NULL, adap,
__process_removed_adapter);
mutex_unlock(&core_lock);
- if (res)
- return res;
/* Remove devices instantiated from sysfs */
mutex_lock_nested(&adap->userspace_clients_lock,
@@ -1088,8 +1252,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
* we can't remove the dummy devices during the first pass: they
* could have been instantiated by real devices wishing to clean
* them up properly, so we give them a chance to do that first. */
- res = device_for_each_child(&adap->dev, NULL, __unregister_client);
- res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
+ device_for_each_child(&adap->dev, NULL, __unregister_client);
+ device_for_each_child(&adap->dev, NULL, __unregister_dummy);
#ifdef CONFIG_I2C_COMPAT
class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1114,8 +1278,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
/* Clear the device structure in case this adapter is ever going to be
added again */
memset(&adap->dev, 0, sizeof(adap->dev));
-
- return 0;
}
EXPORT_SYMBOL(i2c_del_adapter);
@@ -1185,9 +1347,9 @@ EXPORT_SYMBOL(i2c_register_driver);
static int __process_removed_driver(struct device *dev, void *data)
{
- if (dev->type != &i2c_adapter_type)
- return 0;
- return i2c_do_del_adapter(data, to_i2c_adapter(dev));
+ if (dev->type == &i2c_adapter_type)
+ i2c_do_del_adapter(data, to_i2c_adapter(dev));
+ return 0;
}
/**
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d94e0ce..7409ebb 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -191,17 +191,12 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
}
EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
-int i2c_del_mux_adapter(struct i2c_adapter *adap)
+void i2c_del_mux_adapter(struct i2c_adapter *adap)
{
struct i2c_mux_priv *priv = adap->algo_data;
- int ret;
- ret = i2c_del_adapter(adap);
- if (ret < 0)
- return ret;
+ i2c_del_adapter(adap);
kfree(priv);
-
- return 0;
}
EXPORT_SYMBOL_GPL(i2c_del_mux_adapter);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 0be5b83..5faf244 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,18 @@
menu "Multiplexer I2C Chip support"
depends on I2C_MUX
+config I2C_ARB_GPIO_CHALLENGE
+ tristate "GPIO-based I2C arbitration"
+ depends on GENERIC_GPIO && OF
+ help
+ If you say yes to this option, support will be included for an
+ I2C multimaster arbitration scheme using GPIOs and a challenge &
+ response mechanism where masters have to claim the bus by asserting
+ a GPIO.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-arb-gpio-challenge.
+
config I2C_MUX_GPIO
tristate "GPIO-based I2C multiplexer"
depends on GENERIC_GPIO
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 76da869..465778b 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,6 +1,8 @@
#
# Makefile for multiplexer I2C chip drivers.
+obj-$(CONFIG_I2C_ARB_GPIO_CHALLENGE) += i2c-arb-gpio-challenge.o
+
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
new file mode 100644
index 0000000..210b6f7
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -0,0 +1,251 @@
+/*
+ * GPIO-based I2C Arbitration Using a Challenge & Response Mechanism
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+
+/**
+ * struct i2c_arbitrator_data - Driver data for I2C arbitrator
+ *
+ * @parent: Parent adapter
+ * @child: Child bus
+ * @our_gpio: GPIO we'll use to claim.
+ * @our_gpio_release: 0 if active high; 1 if active low; AKA if the GPIO ==
+ * this then consider it released.
+ * @their_gpio: GPIO that the other side will use to claim.
+ * @their_gpio_release: 0 if active high; 1 if active low; AKA if the GPIO ==
+ * this then consider it released.
+ * @slew_delay_us: microseconds to wait for a GPIO to go high.
+ * @wait_retry_us: we'll attempt another claim after this many microseconds.
+ * @wait_free_us: we'll give up after this many microseconds.
+ */
+
+struct i2c_arbitrator_data {
+ struct i2c_adapter *parent;
+ struct i2c_adapter *child;
+ int our_gpio;
+ int our_gpio_release;
+ int their_gpio;
+ int their_gpio_release;
+ unsigned int slew_delay_us;
+ unsigned int wait_retry_us;
+ unsigned int wait_free_us;
+};
+
+
+/**
+ * i2c_arbitrator_select - claim the I2C bus
+ *
+ * Use the GPIO-based signalling protocol; return -EBUSY if we fail.
+ */
+static int i2c_arbitrator_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ const struct i2c_arbitrator_data *arb = data;
+ unsigned long stop_retry, stop_time;
+
+ /* Start a round of trying to claim the bus */
+ stop_time = jiffies + usecs_to_jiffies(arb->wait_free_us) + 1;
+ do {
+ /* Indicate that we want to claim the bus */
+ gpio_set_value(arb->our_gpio, !arb->our_gpio_release);
+ udelay(arb->slew_delay_us);
+
+ /* Wait for the other master to release it */
+ stop_retry = jiffies + usecs_to_jiffies(arb->wait_retry_us) + 1;
+ while (time_before(jiffies, stop_retry)) {
+ int gpio_val = !!gpio_get_value(arb->their_gpio);
+
+ if (gpio_val == arb->their_gpio_release) {
+ /* We got it, so return */
+ return 0;
+ }
+
+ usleep_range(50, 200);
+ }
+
+ /* It didn't release, so give up, wait, and try again */
+ gpio_set_value(arb->our_gpio, arb->our_gpio_release);
+
+ usleep_range(arb->wait_retry_us, arb->wait_retry_us * 2);
+ } while (time_before(jiffies, stop_time));
+
+ /* Give up, release our claim */
+ gpio_set_value(arb->our_gpio, arb->our_gpio_release);
+ udelay(arb->slew_delay_us);
+ dev_err(&adap->dev, "Could not claim bus, timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * i2c_arbitrator_deselect - release the I2C bus
+ *
+ * Release the I2C bus using the GPIO-based signalling protocol.
+ */
+static int i2c_arbitrator_deselect(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ const struct i2c_arbitrator_data *arb = data;
+
+ /* Release the bus and wait for the other master to notice */
+ gpio_set_value(arb->our_gpio, arb->our_gpio_release);
+ udelay(arb->slew_delay_us);
+
+ return 0;
+}
+
+static int i2c_arbitrator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *parent_np;
+ struct i2c_arbitrator_data *arb;
+ enum of_gpio_flags gpio_flags;
+ unsigned long out_init;
+ int ret;
+
+ /* We only support probing from device tree; no platform_data */
+ if (!np) {
+ dev_err(dev, "Cannot find device tree node\n");
+ return -ENODEV;
+ }
+ if (dev->platform_data) {
+ dev_err(dev, "Platform data is not supported\n");
+ return -EINVAL;
+ }
+
+ arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
+ if (!arb) {
+ dev_err(dev, "Cannot allocate i2c_arbitrator_data\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, arb);
+
+ /* Request GPIOs */
+ ret = of_get_named_gpio_flags(np, "our-claim-gpio", 0, &gpio_flags);
+ if (!gpio_is_valid(ret)) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error getting our-claim-gpio\n");
+ return ret;
+ }
+ arb->our_gpio = ret;
+ arb->our_gpio_release = !!(gpio_flags & OF_GPIO_ACTIVE_LOW);
+ out_init = (gpio_flags & OF_GPIO_ACTIVE_LOW) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ ret = devm_gpio_request_one(dev, arb->our_gpio, out_init,
+ "our-claim-gpio");
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error requesting our-claim-gpio\n");
+ return ret;
+ }
+
+ ret = of_get_named_gpio_flags(np, "their-claim-gpios", 0, &gpio_flags);
+ if (!gpio_is_valid(ret)) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error getting their-claim-gpio\n");
+ return ret;
+ }
+ arb->their_gpio = ret;
+ arb->their_gpio_release = !!(gpio_flags & OF_GPIO_ACTIVE_LOW);
+ ret = devm_gpio_request_one(dev, arb->their_gpio, GPIOF_IN,
+ "their-claim-gpio");
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error requesting their-claim-gpio\n");
+ return ret;
+ }
+
+ /* At the moment we only support a single two master (us + 1 other) */
+ if (gpio_is_valid(of_get_named_gpio(np, "their-claim-gpios", 1))) {
+ dev_err(dev, "Only one other master is supported\n");
+ return -EINVAL;
+ }
+
+ /* Arbitration parameters */
+ if (of_property_read_u32(np, "slew-delay-us", &arb->slew_delay_us))
+ arb->slew_delay_us = 10;
+ if (of_property_read_u32(np, "wait-retry-us", &arb->wait_retry_us))
+ arb->wait_retry_us = 3000;
+ if (of_property_read_u32(np, "wait-free-us", &arb->wait_free_us))
+ arb->wait_free_us = 50000;
+
+ /* Find our parent */
+ parent_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!parent_np) {
+ dev_err(dev, "Cannot parse i2c-parent\n");
+ return -EINVAL;
+ }
+ arb->parent = of_find_i2c_adapter_by_node(parent_np);
+ if (!arb->parent) {
+ dev_err(dev, "Cannot find parent bus\n");
+ return -EINVAL;
+ }
+
+ /* Actually add the mux adapter */
+ arb->child = i2c_add_mux_adapter(arb->parent, dev, arb, 0, 0, 0,
+ i2c_arbitrator_select,
+ i2c_arbitrator_deselect);
+ if (!arb->child) {
+ dev_err(dev, "Failed to add adapter\n");
+ ret = -ENODEV;
+ i2c_put_adapter(arb->parent);
+ }
+
+ return ret;
+}
+
+static int i2c_arbitrator_remove(struct platform_device *pdev)
+{
+ struct i2c_arbitrator_data *arb = platform_get_drvdata(pdev);
+
+ i2c_del_mux_adapter(arb->child);
+ i2c_put_adapter(arb->parent);
+
+ return 0;
+}
+
+static const struct of_device_id i2c_arbitrator_of_match[] = {
+ { .compatible = "i2c-arb-gpio-challenge", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_arbitrator_of_match);
+
+static struct platform_driver i2c_arbitrator_driver = {
+ .probe = i2c_arbitrator_probe,
+ .remove = i2c_arbitrator_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "i2c-arb-gpio-challenge",
+ .of_match_table = of_match_ptr(i2c_arbitrator_of_match),
+ },
+};
+
+module_platform_driver(i2c_arbitrator_driver);
+
+MODULE_DESCRIPTION("GPIO-based I2C Arbitration");
+MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-arb-gpio-challenge");
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index abc2e55a..5a0ce00 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -201,10 +201,21 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
for (i = 0; i < mux->data.n_gpios; i++) {
ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request GPIO %d\n",
+ mux->data.gpios[i]);
goto err_request_gpio;
- gpio_direction_output(gpio_base + mux->data.gpios[i],
- initial_state & (1 << i));
+ }
+
+ ret = gpio_direction_output(gpio_base + mux->data.gpios[i],
+ initial_state & (1 << i));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set direction of GPIO %d to output\n",
+ mux->data.gpios[i]);
+ i++; /* gpio_request above succeeded, so must free */
+ goto err_request_gpio;
+ }
}
for (i = 0; i < mux->data.n_values; i++) {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 8e43872..a531d80 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -262,13 +262,11 @@ static int pca954x_remove(struct i2c_client *client)
{
struct pca954x *data = i2c_get_clientdata(client);
const struct chip_desc *chip = &chips[data->type];
- int i, err;
+ int i;
for (i = 0; i < chip->nchans; ++i)
if (data->virt_adaps[i]) {
- err = i2c_del_mux_adapter(data->virt_adaps[i]);
- if (err)
- return err;
+ i2c_del_mux_adapter(data->virt_adaps[i]);
data->virt_adaps[i] = NULL;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 8126824..2ff6204 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1408,7 +1408,7 @@ static int idecd_capacity_proc_show(struct seq_file *m, void *v)
static int idecd_capacity_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idecd_capacity_proc_show, PDE(inode)->data);
+ return single_open(file, idecd_capacity_proc_show, PDE_DATA(inode));
}
static const struct file_operations idecd_capacity_proc_fops = {
@@ -1606,7 +1606,7 @@ out:
return rc;
}
-static int idecd_release(struct gendisk *disk, fmode_t mode)
+static void idecd_release(struct gendisk *disk, fmode_t mode)
{
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
@@ -1615,8 +1615,6 @@ static int idecd_release(struct gendisk *disk, fmode_t mode)
ide_cd_put(info);
mutex_unlock(&ide_cd_mutex);
-
- return 0;
}
static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg)
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
index 8b570a1..0d1fae6 100644
--- a/drivers/ide/ide-disk_proc.c
+++ b/drivers/ide/ide-disk_proc.c
@@ -53,7 +53,7 @@ static int idedisk_cache_proc_show(struct seq_file *m, void *v)
static int idedisk_cache_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idedisk_cache_proc_show, PDE(inode)->data);
+ return single_open(file, idedisk_cache_proc_show, PDE_DATA(inode));
}
static const struct file_operations idedisk_cache_proc_fops = {
@@ -74,7 +74,7 @@ static int idedisk_capacity_proc_show(struct seq_file *m, void *v)
static int idedisk_capacity_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idedisk_capacity_proc_show, PDE(inode)->data);
+ return single_open(file, idedisk_capacity_proc_show, PDE_DATA(inode));
}
static const struct file_operations idedisk_capacity_proc_fops = {
@@ -115,7 +115,7 @@ static int idedisk_sv_proc_show(struct seq_file *m, void *v)
static int idedisk_sv_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idedisk_sv_proc_show, PDE(inode)->data);
+ return single_open(file, idedisk_sv_proc_show, PDE_DATA(inode));
}
static const struct file_operations idedisk_sv_proc_fops = {
@@ -133,7 +133,7 @@ static int idedisk_st_proc_show(struct seq_file *m, void *v)
static int idedisk_st_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idedisk_st_proc_show, PDE(inode)->data);
+ return single_open(file, idedisk_st_proc_show, PDE_DATA(inode));
}
static const struct file_operations idedisk_st_proc_fops = {
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
index 1600720..e7a25ea 100644
--- a/drivers/ide/ide-floppy_proc.c
+++ b/drivers/ide/ide-floppy_proc.c
@@ -15,7 +15,7 @@ static int idefloppy_capacity_proc_show(struct seq_file *m, void *v)
static int idefloppy_capacity_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idefloppy_capacity_proc_show, PDE(inode)->data);
+ return single_open(file, idefloppy_capacity_proc_show, PDE_DATA(inode));
}
static const struct file_operations idefloppy_capacity_proc_fops = {
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 70ea876..de86631 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -250,7 +250,7 @@ static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
}
-static int ide_gd_release(struct gendisk *disk, fmode_t mode)
+static void ide_gd_release(struct gendisk *disk, fmode_t mode)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
ide_drive_t *drive = idkp->drive;
@@ -270,8 +270,6 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
ide_disk_put(idkp);
mutex_unlock(&ide_gd_mutex);
-
- return 0;
}
static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 2abcc47..97c0700 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -58,7 +58,7 @@ static int ide_imodel_proc_show(struct seq_file *m, void *v)
static int ide_imodel_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_imodel_proc_show, PDE(inode)->data);
+ return single_open(file, ide_imodel_proc_show, PDE_DATA(inode));
}
static const struct file_operations ide_imodel_proc_fops = {
@@ -82,7 +82,7 @@ static int ide_mate_proc_show(struct seq_file *m, void *v)
static int ide_mate_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_mate_proc_show, PDE(inode)->data);
+ return single_open(file, ide_mate_proc_show, PDE_DATA(inode));
}
static const struct file_operations ide_mate_proc_fops = {
@@ -103,7 +103,7 @@ static int ide_channel_proc_show(struct seq_file *m, void *v)
static int ide_channel_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_channel_proc_show, PDE(inode)->data);
+ return single_open(file, ide_channel_proc_show, PDE_DATA(inode));
}
static const struct file_operations ide_channel_proc_fops = {
@@ -143,7 +143,7 @@ static int ide_identify_proc_show(struct seq_file *m, void *v)
static int ide_identify_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_identify_proc_show, PDE(inode)->data);
+ return single_open(file, ide_identify_proc_show, PDE_DATA(inode));
}
static const struct file_operations ide_identify_proc_fops = {
@@ -325,7 +325,7 @@ static int ide_settings_proc_show(struct seq_file *m, void *v)
static int ide_settings_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_settings_proc_show, PDE(inode)->data);
+ return single_open(file, ide_settings_proc_show, PDE_DATA(inode));
}
#define MAX_LEN 30
@@ -333,7 +333,7 @@ static int ide_settings_proc_open(struct inode *inode, struct file *file)
static ssize_t ide_settings_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- ide_drive_t *drive = (ide_drive_t *) PDE(file_inode(file))->data;
+ ide_drive_t *drive = PDE_DATA(file_inode(file));
char name[MAX_LEN + 1];
int for_real = 0, mul_factor, div_factor;
unsigned long n;
@@ -474,7 +474,7 @@ static int ide_geometry_proc_show(struct seq_file *m, void *v)
static int ide_geometry_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_geometry_proc_show, PDE(inode)->data);
+ return single_open(file, ide_geometry_proc_show, PDE_DATA(inode));
}
const struct file_operations ide_geometry_proc_fops = {
@@ -497,7 +497,7 @@ static int ide_dmodel_proc_show(struct seq_file *seq, void *v)
static int ide_dmodel_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_dmodel_proc_show, PDE(inode)->data);
+ return single_open(file, ide_dmodel_proc_show, PDE_DATA(inode));
}
static const struct file_operations ide_dmodel_proc_fops = {
@@ -525,7 +525,7 @@ static int ide_driver_proc_show(struct seq_file *m, void *v)
static int ide_driver_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_driver_proc_show, PDE(inode)->data);
+ return single_open(file, ide_driver_proc_show, PDE_DATA(inode));
}
static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
@@ -558,7 +558,7 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
static ssize_t ide_driver_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- ide_drive_t *drive = (ide_drive_t *) PDE(file_inode(file))->data;
+ ide_drive_t *drive = PDE_DATA(file_inode(file));
char name[32];
if (!capable(CAP_SYS_ADMIN))
@@ -601,7 +601,7 @@ static int ide_media_proc_show(struct seq_file *m, void *v)
static int ide_media_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ide_media_proc_show, PDE(inode)->data);
+ return single_open(file, ide_media_proc_show, PDE_DATA(inode));
}
static const struct file_operations ide_media_proc_fops = {
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index ce8237d..c6c574b 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1847,7 +1847,7 @@ static int idetape_name_proc_show(struct seq_file *m, void *v)
static int idetape_name_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, idetape_name_proc_show, PDE(inode)->data);
+ return single_open(file, idetape_name_proc_show, PDE_DATA(inode));
}
static const struct file_operations idetape_name_proc_fops = {
@@ -1918,15 +1918,13 @@ static int idetape_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int idetape_release(struct gendisk *disk, fmode_t mode)
+static void idetape_release(struct gendisk *disk, fmode_t mode)
{
struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
mutex_lock(&ide_tape_mutex);
ide_tape_put(tape);
mutex_unlock(&ide_tape_mutex);
-
- return 0;
}
static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 0bb99bb..c47c203 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work)
}
return;
}
+ if (empty)
+ return;
spin_lock_irqsave(&cm_id_priv->lock, flags);
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a8fdd33..22192de 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
struct ib_qp *qp = context;
list_for_each_entry(event->element.qp, &qp->open_list, open_list)
- event->element.qp->event_handler(event, event->element.qp->qp_context);
+ if (event->element.qp->event_handler)
+ event->element.qp->event_handler(event, event->element.qp->qp_context);
}
static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 31f9201..c40088e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo,
kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
if (random) {
j = 0;
- random_bytes = random32();
+ random_bytes = prandom_u32();
for (i = 0; i < RANDOM_SIZE; i++)
rarray[i] = i + skip_low;
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
if (j >= RANDOM_SIZE) {
j = 0;
- random_bytes = random32();
+ random_bytes = prandom_u32();
}
idx = (random_bytes >> (j * 2)) & 0xF;
kfifo_in(fifo,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9c12da0..e87f220 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
__be64 *page_list = NULL;
int shift = 0;
u64 total_size;
- int npages;
+ int npages = 0;
int ret;
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index f95e5df..0161ae6 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
if (obj < alloc->max) {
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last += random32() % RANDOM_SKIP;
+ alloc->last += prandom_u32() % RANDOM_SKIP;
else
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
@@ -88,7 +88,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last = random32() % RANDOM_SKIP;
+ alloc->last = prandom_u32() % RANDOM_SKIP;
else
alloc->last = 0;
alloc->max = num;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5b059e2..2320404 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -111,6 +111,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
return 0;
}
+static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
+{
+ int ret = -ENOSYS;
+ if (user)
+ ret = alloc_oc_sq(rdev, sq);
+ if (ret)
+ ret = alloc_host_sq(rdev, sq);
+ return ret;
+}
+
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct c4iw_dev_ucontext *uctx)
{
@@ -179,15 +189,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
goto free_sw_rq;
}
- if (user) {
- if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
- goto free_hwaddr;
- } else {
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_hwaddr;
- }
-
+ ret = alloc_sq(rdev, &wq->sq, user);
+ if (ret)
+ goto free_hwaddr;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index aed8afe..6d7f453 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/aio.h>
#include <linux/jiffies.h>
#include <linux/cpu.h>
#include <asm/pgtable.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index ea93870..44ea939 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2187,7 +2187,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
if (ret)
goto err_reg;
- if (ipath_verbs_register_sysfs(dev))
+ ret = ipath_verbs_register_sysfs(dev);
+ if (ret)
goto err_class;
enable_timer(dd);
@@ -2327,15 +2328,15 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev)
int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
- if (device_create_file(&dev->dev,
- ipath_class_attributes[i])) {
- ret = 1;
+ for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
+ ret = device_create_file(&dev->dev,
+ ipath_class_attributes[i]);
+ if (ret)
goto bail;
- }
-
- ret = 0;
-
+ }
+ return 0;
bail:
+ for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
+ device_remove_file(&dev->dev, ipath_class_attributes[i]);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 73b3a71..d5e60f4 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -33,6 +33,7 @@
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
+#include <linux/mlx4/srq.h>
#include <linux/slab.h>
#include "mlx4_ib.h"
@@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_qp *mqp;
struct mlx4_ib_wq *wq;
struct mlx4_ib_srq *srq;
+ struct mlx4_srq *msrq = NULL;
int is_send;
int is_error;
u32 g_mlpath_rqpn;
@@ -653,6 +655,20 @@ repoll:
wc->qp = &(*cur_qp)->ibqp;
+ if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
+ u32 srq_num;
+ g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
+ srq_num = g_mlpath_rqpn & 0xffffff;
+ /* SRQ is also in the radix tree */
+ msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
+ srq_num);
+ if (unlikely(!msrq)) {
+ pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
+ cq->mcq.cqn, srq_num);
+ return -EINVAL;
+ }
+ }
+
if (is_send) {
wq = &(*cur_qp)->sq;
if (!(*cur_qp)->sq_signal_bits) {
@@ -666,6 +682,11 @@ repoll:
wqe_ctr = be16_to_cpu(cqe->wqe_index);
wc->wr_id = srq->wrid[wqe_ctr];
mlx4_ib_free_srq_wqe(srq, wqe_ctr);
+ } else if (msrq) {
+ srq = to_mibsrq(msrq);
+ wqe_ctr = be16_to_cpu(cqe->wqe_index);
+ wc->wr_id = srq->wrid[wqe_ctr];
+ mlx4_ib_free_srq_wqe(srq, wqe_ctr);
} else {
wq = &(*cur_qp)->rq;
tail = wq->tail & (wq->wqe_cnt - 1);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 934792c..4d599ce 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -93,7 +93,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
__be64 mlx4_ib_gen_node_guid(void)
{
#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
- return cpu_to_be64(NODE_GUID_HI | random32());
+ return cpu_to_be64(NODE_GUID_HI | prandom_u32());
}
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 35cced2..4f10af2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
context->xrcd = cpu_to_be32((u32) qp->xrcdn);
+ if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ context->param3 |= cpu_to_be32(1 << 30);
}
if (qp->ibqp.uobject)
@@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+ context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
+ MLX4_IB_LINK_TYPE_ETH;
+
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 4f7aa30..b56c942 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -39,7 +39,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/io.h>
-#include <linux/uio.h>
+#include <linux/aio.h>
#include <linux/jiffies.h>
#include <asm/pgtable.h>
#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 034cc82..3c8e4e3 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -808,10 +808,14 @@ int qib_verbs_register_sysfs(struct qib_devdata *dd)
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
ret = device_create_file(&dev->dev, qib_attributes[i]);
if (ret)
- return ret;
+ goto bail;
}
return 0;
+bail:
+ for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
+ device_remove_file(&dev->dev, qib_attributes[i]);
+ return ret;
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7c0ab16..904c384 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -2234,7 +2234,8 @@ int qib_register_ib_device(struct qib_devdata *dd)
if (ret)
goto err_agents;
- if (qib_verbs_register_sysfs(dd))
+ ret = qib_verbs_register_sysfs(dd);
+ if (ret)
goto err_class;
goto bail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1ef880d..3eceb61 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -460,7 +460,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
goto err_qp;
}
- psn = random32() & 0xffffff;
+ psn = prandom_u32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 554b906..b6e049a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -830,7 +830,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
*/
memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
- return 0;
+ return sizeof *header;
}
static void ipoib_set_mcast_list(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0ab8c9c..f19b099 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -82,10 +82,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
int iser_debug_level = 0;
-MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover "
- "v" DRV_VER " (" DRV_DATE ")");
+MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
+MODULE_VERSION(DRV_VER);
module_param_named(debug_level, iser_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
@@ -370,8 +370,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
- iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
- conn, conn->dd_data, ib_conn);
+ iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n",
+ conn, conn->dd_data, ib_conn);
iser_conn = conn->dd_data;
ib_conn->iser_conn = iser_conn;
iser_conn->ib_conn = ib_conn;
@@ -475,28 +475,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_HDRDGST_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "DataDigest wasn't negotiated to None");
+ iser_err("DataDigest wasn't negotiated to None");
return -EPROTO;
}
break;
case ISCSI_PARAM_DATADGST_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "DataDigest wasn't negotiated to None");
+ iser_err("DataDigest wasn't negotiated to None");
return -EPROTO;
}
break;
case ISCSI_PARAM_IFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "IFMarker wasn't negotiated to No");
+ iser_err("IFMarker wasn't negotiated to No");
return -EPROTO;
}
break;
case ISCSI_PARAM_OFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "OFMarker wasn't negotiated to No");
+ iser_err("OFMarker wasn't negotiated to No");
return -EPROTO;
}
break;
@@ -596,7 +596,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
ib_conn->state == ISER_CONN_DOWN))
rc = -1;
- iser_err("ib conn %p rc = %d\n", ib_conn, rc);
+ iser_info("ib conn %p rc = %d\n", ib_conn, rc);
if (rc > 0)
return 1; /* success, this is the equivalent of POLLOUT */
@@ -623,7 +623,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
- iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn);
}
@@ -682,7 +682,7 @@ static umode_t iser_attr_is_visible(int param_type, int param)
static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
- .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .name = "iSCSI Initiator over iSER",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
@@ -740,7 +740,7 @@ static int __init iser_init(void)
iser_dbg("Starting iSER datamover...\n");
if (iscsi_max_lun < 1) {
- printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
+ iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
return -EINVAL;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5babdb3..06f578c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -42,6 +42,7 @@
#include <linux/types.h>
#include <linux/net.h>
+#include <linux/printk.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -65,20 +66,26 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "0.1"
-#define DRV_DATE "May 7th, 2006"
+#define DRV_VER "1.1"
#define iser_dbg(fmt, arg...) \
do { \
- if (iser_debug_level > 1) \
+ if (iser_debug_level > 2) \
printk(KERN_DEBUG PFX "%s:" fmt,\
__func__ , ## arg); \
} while (0)
#define iser_warn(fmt, arg...) \
do { \
+ if (iser_debug_level > 1) \
+ pr_warn(PFX "%s:" fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define iser_info(fmt, arg...) \
+ do { \
if (iser_debug_level > 0) \
- printk(KERN_DEBUG PFX "%s:" fmt,\
+ pr_info(PFX "%s:" fmt, \
__func__ , ## arg); \
} while (0)
@@ -133,6 +140,15 @@ struct iser_hdr {
__be64 read_va;
} __attribute__((packed));
+
+#define ISER_ZBVA_NOT_SUPPORTED 0x80
+#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
+
+struct iser_cm_hdr {
+ u8 flags;
+ u8 rsvd[3];
+} __packed;
+
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index be1edb0..68ebb7f 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
for (i=0 ; i<ib_conn->page_vec->length ; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long) ib_conn->page_vec->pages[i]);
- return err;
}
+ if (err)
+ return err;
}
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4debadc..5278916 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -74,8 +74,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
struct iser_cq_desc *cq_desc;
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
- iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
- device->ib_device->name, device->ib_device->num_comp_vectors);
+ iser_info("using %d CQs, device %s supports %d vectors\n",
+ device->cqs_used, device->ib_device->name,
+ device->ib_device->num_comp_vectors);
device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
GFP_KERNEL);
@@ -262,7 +263,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
min_index = index;
device->cq_active_qps[min_index]++;
mutex_unlock(&ig.connlist_mutex);
- iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+ iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
@@ -280,9 +281,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->fmr_pool, ib_conn->cma_id->qp);
+ iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
+ ib_conn, ib_conn->cma_id,
+ ib_conn->fmr_pool, ib_conn->cma_id->qp);
return ret;
out_err:
@@ -299,9 +300,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
int cq_index;
BUG_ON(ib_conn == NULL);
- iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->fmr_pool, ib_conn->qp);
+ iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n",
+ ib_conn, ib_conn->cma_id,
+ ib_conn->fmr_pool, ib_conn->qp);
/* qp is created only once both addr & route are resolved */
if (ib_conn->fmr_pool != NULL)
@@ -379,7 +380,7 @@ static void iser_device_try_release(struct iser_device *device)
{
mutex_lock(&ig.device_list_mutex);
device->refcount--;
- iser_err("device %p refcount %d\n",device,device->refcount);
+ iser_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
iser_free_device_ib_res(device);
list_del(&device->ig_list);
@@ -498,6 +499,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
+ struct iser_cm_hdr req_hdr;
ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
if (ret)
@@ -509,6 +511,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
+ memset(&req_hdr, 0, sizeof(req_hdr));
+ req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
+ ISER_SEND_W_INV_NOT_SUPPORTED);
+ conn_param.private_data = (void *)&req_hdr;
+ conn_param.private_data_len = sizeof(struct iser_cm_hdr);
+
ret = rdma_connect(cma_id, &conn_param);
if (ret) {
iser_err("failure connecting: %d\n", ret);
@@ -558,8 +566,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
{
int ret = 0;
- iser_err("event %d status %d conn %p id %p\n",
- event->event, event->status, cma_id->context, cma_id);
+ iser_info("event %d status %d conn %p id %p\n",
+ event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
@@ -619,8 +627,8 @@ int iser_connect(struct iser_conn *ib_conn,
/* the device is known only --after-- address resolution */
ib_conn->device = NULL;
- iser_err("connecting to: %pI4, port 0x%x\n",
- &dst_addr->sin_addr, dst_addr->sin_port);
+ iser_info("connecting to: %pI4, port 0x%x\n",
+ &dst_addr->sin_addr, dst_addr->sin_port);
ib_conn->state = ISER_CONN_PENDING;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index c09d41b..b08ca7a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
default:
- WARN_ON("ERROR: unexpected command state");
+ WARN(1, "Unexpected command state (%d)", state);
break;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index ac05006..6a195d5 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -628,4 +628,16 @@ config KEYBOARD_W90P910
To compile this driver as a module, choose M here: the
module will be called w90p910_keypad.
+config KEYBOARD_CROS_EC
+ tristate "ChromeOS EC keyboard"
+ select INPUT_MATRIXKMAP
+ depends on MFD_CROS_EC
+ help
+ Say Y here to enable the matrix keyboard used by ChromeOS devices
+ and implemented on the ChromeOS EC. You must enable one bus option
+ (MFD_CROS_EC_I2C or MFD_CROS_EC_SPI) to use this.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_keyb.
+
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 49b1645..0c43e8c 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
+obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
new file mode 100644
index 0000000..49557f2
--- /dev/null
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -0,0 +1,334 @@
+/*
+ * ChromeOS EC keyboard driver
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the Chrome OS EC byte-level message-based protocol for
+ * communicating the keyboard state (which keys are pressed) from a keyboard EC
+ * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+ * but everything else (including deghosting) is done here. The main
+ * motivation for this is to keep the EC firmware as simple as possible, since
+ * it cannot be easily upgraded and EC flash/IRAM space is relatively
+ * expensive.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+
+/*
+ * @rows: Number of rows in the keypad
+ * @cols: Number of columns in the keypad
+ * @row_shift: log2 or number of rows, rounded up
+ * @keymap_data: Matrix keymap data used to convert to keyscan values
+ * @ghost_filter: true to enable the matrix key-ghosting filter
+ * @dev: Device pointer
+ * @idev: Input device
+ * @ec: Top level ChromeOS device to use to talk to EC
+ * @event_notifier: interrupt event notifier for transport devices
+ */
+struct cros_ec_keyb {
+ unsigned int rows;
+ unsigned int cols;
+ int row_shift;
+ const struct matrix_keymap_data *keymap_data;
+ bool ghost_filter;
+
+ struct device *dev;
+ struct input_dev *idev;
+ struct cros_ec_device *ec;
+ struct notifier_block notifier;
+};
+
+
+static bool cros_ec_keyb_row_has_ghosting(struct cros_ec_keyb *ckdev,
+ uint8_t *buf, int row)
+{
+ int pressed_in_row = 0;
+ int row_has_teeth = 0;
+ int col, mask;
+
+ mask = 1 << row;
+ for (col = 0; col < ckdev->cols; col++) {
+ if (buf[col] & mask) {
+ pressed_in_row++;
+ row_has_teeth |= buf[col] & ~mask;
+ if (pressed_in_row > 1 && row_has_teeth) {
+ /* ghosting */
+ dev_dbg(ckdev->dev,
+ "ghost found at: r%d c%d, pressed %d, teeth 0x%x\n",
+ row, col, pressed_in_row,
+ row_has_teeth);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Returns true when there is at least one combination of pressed keys that
+ * results in ghosting.
+ */
+static bool cros_ec_keyb_has_ghosting(struct cros_ec_keyb *ckdev, uint8_t *buf)
+{
+ int row;
+
+ /*
+ * Ghosting happens if for any pressed key X there are other keys
+ * pressed both in the same row and column of X as, for instance,
+ * in the following diagram:
+ *
+ * . . Y . g .
+ * . . . . . .
+ * . . . . . .
+ * . . X . Z .
+ *
+ * In this case only X, Y, and Z are pressed, but g appears to be
+ * pressed too (see Wikipedia).
+ *
+ * We can detect ghosting in a single pass (*) over the keyboard state
+ * by maintaining two arrays. pressed_in_row counts how many pressed
+ * keys we have found in a row. row_has_teeth is true if any of the
+ * pressed keys for this row has other pressed keys in its column. If
+ * at any point of the scan we find that a row has multiple pressed
+ * keys, and at least one of them is at the intersection with a column
+ * with multiple pressed keys, we're sure there is ghosting.
+ * Conversely, if there is ghosting, we will detect such situation for
+ * at least one key during the pass.
+ *
+ * (*) This looks linear in the number of keys, but it's not. We can
+ * cheat because the number of rows is small.
+ */
+ for (row = 0; row < ckdev->rows; row++)
+ if (cros_ec_keyb_row_has_ghosting(ckdev, buf, row))
+ return true;
+
+ return false;
+}
+
+/*
+ * Compares the new keyboard state to the old one and produces key
+ * press/release events accordingly. The keyboard state is 13 bytes (one byte
+ * per column)
+ */
+static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
+ uint8_t *kb_state, int len)
+{
+ struct input_dev *idev = ckdev->idev;
+ int col, row;
+ int new_state;
+ int num_cols;
+
+ num_cols = len;
+
+ if (ckdev->ghost_filter && cros_ec_keyb_has_ghosting(ckdev, kb_state)) {
+ /*
+ * Simple-minded solution: ignore this state. The obvious
+ * improvement is to only ignore changes to keys involved in
+ * the ghosting, but process the other changes.
+ */
+ dev_dbg(ckdev->dev, "ghosting found\n");
+ return;
+ }
+
+ for (col = 0; col < ckdev->cols; col++) {
+ for (row = 0; row < ckdev->rows; row++) {
+ int pos = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ const unsigned short *keycodes = idev->keycode;
+ int code;
+
+ code = keycodes[pos];
+ new_state = kb_state[col] & (1 << row);
+ if (!!new_state != test_bit(code, idev->key)) {
+ dev_dbg(ckdev->dev,
+ "changed: [r%d c%d]: byte %02x\n",
+ row, col, new_state);
+
+ input_report_key(idev, code, new_state);
+ }
+ }
+ }
+ input_sync(ckdev->idev);
+}
+
+static int cros_ec_keyb_open(struct input_dev *dev)
+{
+ struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
+
+ return blocking_notifier_chain_register(&ckdev->ec->event_notifier,
+ &ckdev->notifier);
+}
+
+static void cros_ec_keyb_close(struct input_dev *dev)
+{
+ struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
+
+ blocking_notifier_chain_unregister(&ckdev->ec->event_notifier,
+ &ckdev->notifier);
+}
+
+static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state)
+{
+ return ckdev->ec->command_recv(ckdev->ec, EC_CMD_MKBP_STATE,
+ kb_state, ckdev->cols);
+}
+
+static int cros_ec_keyb_work(struct notifier_block *nb,
+ unsigned long state, void *_notify)
+{
+ int ret;
+ struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb,
+ notifier);
+ uint8_t kb_state[ckdev->cols];
+
+ ret = cros_ec_keyb_get_state(ckdev, kb_state);
+ if (ret >= 0)
+ cros_ec_keyb_process(ckdev, kb_state, ret);
+
+ return NOTIFY_DONE;
+}
+
+/* Clear any keys in the buffer */
+static void cros_ec_keyb_clear_keyboard(struct cros_ec_keyb *ckdev)
+{
+ uint8_t old_state[ckdev->cols];
+ uint8_t new_state[ckdev->cols];
+ unsigned long duration;
+ int i, ret;
+
+ /*
+ * Keep reading until we see that the scan state does not change.
+ * That indicates that we are done.
+ *
+ * Assume that the EC keyscan buffer is at most 32 deep.
+ */
+ duration = jiffies;
+ ret = cros_ec_keyb_get_state(ckdev, new_state);
+ for (i = 1; !ret && i < 32; i++) {
+ memcpy(old_state, new_state, sizeof(old_state));
+ ret = cros_ec_keyb_get_state(ckdev, new_state);
+ if (0 == memcmp(old_state, new_state, sizeof(old_state)))
+ break;
+ }
+ duration = jiffies - duration;
+ dev_info(ckdev->dev, "Discarded %d keyscan(s) in %dus\n", i,
+ jiffies_to_usecs(duration));
+}
+
+static int cros_ec_keyb_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = ec->dev;
+ struct cros_ec_keyb *ckdev;
+ struct input_dev *idev;
+ struct device_node *np;
+ int err;
+
+ np = pdev->dev.of_node;
+ if (!np)
+ return -ENODEV;
+
+ ckdev = devm_kzalloc(&pdev->dev, sizeof(*ckdev), GFP_KERNEL);
+ if (!ckdev)
+ return -ENOMEM;
+ err = matrix_keypad_parse_of_params(&pdev->dev, &ckdev->rows,
+ &ckdev->cols);
+ if (err)
+ return err;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ ckdev->ec = ec;
+ ckdev->notifier.notifier_call = cros_ec_keyb_work;
+ ckdev->dev = dev;
+ dev_set_drvdata(&pdev->dev, ckdev);
+
+ idev->name = ec->ec_name;
+ idev->phys = ec->phys_name;
+ __set_bit(EV_REP, idev->evbit);
+
+ idev->id.bustype = BUS_VIRTUAL;
+ idev->id.version = 1;
+ idev->id.product = 0;
+ idev->dev.parent = &pdev->dev;
+ idev->open = cros_ec_keyb_open;
+ idev->close = cros_ec_keyb_close;
+
+ ckdev->ghost_filter = of_property_read_bool(np,
+ "google,needs-ghost-filter");
+
+ err = matrix_keypad_build_keymap(NULL, NULL, ckdev->rows, ckdev->cols,
+ NULL, idev);
+ if (err) {
+ dev_err(dev, "cannot build key matrix\n");
+ return err;
+ }
+
+ ckdev->row_shift = get_count_order(ckdev->cols);
+
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(idev, ckdev);
+ ckdev->idev = idev;
+ err = input_register_device(ckdev->idev);
+ if (err) {
+ dev_err(dev, "cannot register input device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_keyb_resume(struct device *dev)
+{
+ struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+
+ /*
+ * When the EC is not a wake source, then it could not have caused the
+ * resume, so we clear the EC's key scan buffer. If the EC was a
+ * wake source (e.g. the lid is open and the user might press a key to
+ * wake) then the key scan buffer should be preserved.
+ */
+ if (ckdev->ec->was_wake_device)
+ cros_ec_keyb_clear_keyboard(ckdev);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_keyb_pm_ops, NULL, cros_ec_keyb_resume);
+
+static struct platform_driver cros_ec_keyb_driver = {
+ .probe = cros_ec_keyb_probe,
+ .driver = {
+ .name = "cros-ec-keyb",
+ .pm = &cros_ec_keyb_pm_ops,
+ },
+};
+
+module_platform_driver(cros_ec_keyb_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC keyboard driver");
+MODULE_ALIAS("platform:cros-ec-keyb");
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 1b8add6..4218143 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -144,12 +144,13 @@ static int lpc32xx_parse_dt(struct device *dev,
{
struct device_node *np = dev->of_node;
u32 rows = 0, columns = 0;
+ int err;
- of_property_read_u32(np, "keypad,num-rows", &rows);
- of_property_read_u32(np, "keypad,num-columns", &columns);
- if (!rows || rows != columns) {
- dev_err(dev,
- "rows and columns must be specified and be equal!\n");
+ err = matrix_keypad_parse_of_params(dev, &rows, &columns);
+ if (err)
+ return err;
+ if (rows != columns) {
+ dev_err(dev, "rows and columns must be equal!\n");
return -EINVAL;
}
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index e25b022..1b28909 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -215,18 +215,12 @@ static int omap4_keypad_parse_dt(struct device *dev,
struct omap4_keypad *keypad_data)
{
struct device_node *np = dev->of_node;
+ int err;
- if (!np) {
- dev_err(dev, "missing DT data");
- return -EINVAL;
- }
-
- of_property_read_u32(np, "keypad,num-rows", &keypad_data->rows);
- of_property_read_u32(np, "keypad,num-columns", &keypad_data->cols);
- if (!keypad_data->rows || !keypad_data->cols) {
- dev_err(dev, "number of keypad rows/columns not specified\n");
- return -EINVAL;
- }
+ err = matrix_keypad_parse_of_params(dev, &keypad_data->rows,
+ &keypad_data->cols);
+ if (err)
+ return err;
if (of_get_property(np, "linux,input-no-autorepeat", NULL))
keypad_data->no_autorepeat = true;
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index a34cc67..55c1530 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -288,8 +288,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
irq_is_gpio = pdata->irq_is_gpio;
} else {
struct device_node *np = dev->of_node;
- of_property_read_u32(np, "keypad,num-rows", &rows);
- of_property_read_u32(np, "keypad,num-columns", &cols);
+ int err;
+
+ err = matrix_keypad_parse_of_params(dev, &rows, &cols);
+ if (err)
+ return err;
rep = of_property_read_bool(np, "keypad,autorepeat");
}
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
index 3ae496e..08b61f5 100644
--- a/drivers/input/matrix-keymap.c
+++ b/drivers/input/matrix-keymap.c
@@ -50,6 +50,26 @@ static bool matrix_keypad_map_key(struct input_dev *input_dev,
}
#ifdef CONFIG_OF
+int matrix_keypad_parse_of_params(struct device *dev,
+ unsigned int *rows, unsigned int *cols)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "missing DT data");
+ return -EINVAL;
+ }
+ of_property_read_u32(np, "keypad,num-rows", rows);
+ of_property_read_u32(np, "keypad,num-columns", cols);
+ if (!*rows || !*cols) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(matrix_keypad_parse_of_params);
+
static int matrix_keypad_parse_of_keymap(const char *propname,
unsigned int rows, unsigned int cols,
struct input_dev *input_dev)
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 2e3334b..86b8228 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -41,6 +41,7 @@
#include <linux/time.h>
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/rtc.h>
#include <linux/mutex.h>
@@ -74,9 +75,6 @@ static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
static int hp_sdc_rtc_open(struct inode *inode, struct file *file);
static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on);
-static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-
static void hp_sdc_rtc_isr (int irq, void *dev_id,
uint8_t status, uint8_t data)
{
@@ -427,22 +425,19 @@ static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on)
return fasync_helper (fd, filp, on, &hp_sdc_rtc_async_queue);
}
-static int hp_sdc_rtc_proc_output (char *buf)
+static int hp_sdc_rtc_proc_show(struct seq_file *m, void *v)
{
#define YN(bit) ("no")
#define NY(bit) ("yes")
- char *p;
struct rtc_time tm;
struct timeval tv;
memset(&tm, 0, sizeof(struct rtc_time));
- p = buf;
-
if (hp_sdc_rtc_read_bbrtc(&tm)) {
- p += sprintf(p, "BBRTC\t\t: READ FAILED!\n");
+ seq_puts(m, "BBRTC\t\t: READ FAILED!\n");
} else {
- p += sprintf(p,
+ seq_printf(m,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
"rtc_epoch\t: %04lu\n",
@@ -452,41 +447,41 @@ static int hp_sdc_rtc_proc_output (char *buf)
}
if (hp_sdc_rtc_read_rt(&tv)) {
- p += sprintf(p, "i8042 rtc\t: READ FAILED!\n");
+ seq_puts(m, "i8042 rtc\t: READ FAILED!\n");
} else {
- p += sprintf(p, "i8042 rtc\t: %ld.%02d seconds\n",
+ seq_printf(m, "i8042 rtc\t: %ld.%02d seconds\n",
tv.tv_sec, (int)tv.tv_usec/1000);
}
if (hp_sdc_rtc_read_fhs(&tv)) {
- p += sprintf(p, "handshake\t: READ FAILED!\n");
+ seq_puts(m, "handshake\t: READ FAILED!\n");
} else {
- p += sprintf(p, "handshake\t: %ld.%02d seconds\n",
+ seq_printf(m, "handshake\t: %ld.%02d seconds\n",
tv.tv_sec, (int)tv.tv_usec/1000);
}
if (hp_sdc_rtc_read_mt(&tv)) {
- p += sprintf(p, "alarm\t\t: READ FAILED!\n");
+ seq_puts(m, "alarm\t\t: READ FAILED!\n");
} else {
- p += sprintf(p, "alarm\t\t: %ld.%02d seconds\n",
+ seq_printf(m, "alarm\t\t: %ld.%02d seconds\n",
tv.tv_sec, (int)tv.tv_usec/1000);
}
if (hp_sdc_rtc_read_dt(&tv)) {
- p += sprintf(p, "delay\t\t: READ FAILED!\n");
+ seq_puts(m, "delay\t\t: READ FAILED!\n");
} else {
- p += sprintf(p, "delay\t\t: %ld.%02d seconds\n",
+ seq_printf(m, "delay\t\t: %ld.%02d seconds\n",
tv.tv_sec, (int)tv.tv_usec/1000);
}
if (hp_sdc_rtc_read_ct(&tv)) {
- p += sprintf(p, "periodic\t: READ FAILED!\n");
+ seq_puts(m, "periodic\t: READ FAILED!\n");
} else {
- p += sprintf(p, "periodic\t: %ld.%02d seconds\n",
+ seq_printf(m, "periodic\t: %ld.%02d seconds\n",
tv.tv_sec, (int)tv.tv_usec/1000);
}
- p += sprintf(p,
+ seq_printf(m,
"DST_enable\t: %s\n"
"BCD\t\t: %s\n"
"24hr\t\t: %s\n"
@@ -506,23 +501,23 @@ static int hp_sdc_rtc_proc_output (char *buf)
1UL,
1 ? "okay" : "dead");
- return p - buf;
+ return 0;
#undef YN
#undef NY
}
-static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int hp_sdc_rtc_proc_open(struct inode *inode, struct file *file)
{
- int len = hp_sdc_rtc_proc_output (page);
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
+ return single_open(file, hp_sdc_rtc_proc_show, NULL);
}
+static const struct file_operations hp_sdc_rtc_proc_fops = {
+ .open = hp_sdc_rtc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int hp_sdc_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -715,8 +710,7 @@ static int __init hp_sdc_rtc_init(void)
if (misc_register(&hp_sdc_rtc_dev) != 0)
printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n");
- create_proc_read_entry ("driver/rtc", 0, NULL,
- hp_sdc_rtc_read_proc, NULL);
+ proc_create("driver/rtc", 0, NULL, &hp_sdc_rtc_proc_fops);
printk(KERN_INFO "HP i8042 SDC + MSM-58321 RTC support loaded "
"(RTC v " RTC_VERSION ")\n");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8301837..21d02b0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -46,6 +46,7 @@
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
#include "irq_remapping.h"
+#include "pci.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -263,12 +264,6 @@ static bool check_device(struct device *dev)
return true;
}
-static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
-{
- pci_dev_put(*from);
- *from = to;
-}
-
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
{
while (!bus->self) {
@@ -701,9 +696,6 @@ retry:
static void iommu_poll_events(struct amd_iommu *iommu)
{
u32 head, tail;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
@@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
}
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
@@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
- unsigned long flags;
u32 head, tail;
if (iommu->ppr_log == NULL)
return;
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
@@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- /*
- * Release iommu->lock because ppr-handling might need to
- * re-acquire it
- */
- spin_unlock_irqrestore(&iommu->lock, flags);
-
/* Handle PPR entry */
iommu_handle_ppr_entry(iommu, entry);
- spin_lock_irqsave(&iommu->lock, flags);
-
/* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = (struct amd_iommu *) data;
+ u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- for_each_iommu(iommu) {
- iommu_poll_events(iommu);
- iommu_poll_ppr_log(iommu);
- }
+ while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
+ /* Enable EVT and PPR interrupts again */
+ writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & MMIO_STATUS_EVT_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
+ iommu_poll_events(iommu);
+ }
+
+ if (status & MMIO_STATUS_PPR_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
+ iommu_poll_ppr_log(iommu);
+ }
+
+ /*
+ * Hardware bug: ERBT1312
+ * When re-enabling interrupt (by writing 1
+ * to clear the bit), the hardware might also try to set
+ * the interrupt bit in the event status register.
+ * In this scenario, the bit will be set, and disable
+ * subsequent interrupts.
+ *
+ * Workaround: The IOMMU driver should read back the
+ * status register and check if the interrupt bits are cleared.
+ * If not, driver will need to go through the interrupt handler
+ * again and re-clear the bits
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ }
return IRQ_HANDLED;
}
@@ -2839,24 +2839,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
}
/*
- * This is a special map_sg function which is used if we should map a
- * device which is not handled by an AMD IOMMU in the system.
- */
-static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sglist, s, nelems, i) {
- s->dma_address = (dma_addr_t)sg_phys(s);
- s->dma_length = s->length;
- }
-
- return nelems;
-}
-
-/*
* The exported map_sg function for dma_ops (handles scatter-gather
* lists).
*/
@@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
INC_STATS_COUNTER(cnt_map_sg);
domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return map_sg_no_iommu(dev, sglist, nelems, dir);
- else if (IS_ERR(domain))
+ if (IS_ERR(domain))
return 0;
dma_mask = *dev->dma_mask;
@@ -3410,7 +3390,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
- unsigned long iova)
+ dma_addr_t iova)
{
struct protection_domain *domain = dom->priv;
unsigned long offset_mask;
@@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
if (!table)
goto out;
+ /* Initialize table spin-lock */
+ spin_lock_init(&table->lock);
+
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
table->min_index = 32;
@@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
c = 0;
if (c == count) {
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
for (; c != 0; --c)
table->table[index - c + 1] = IRTE_ALLOCATED;
@@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
index -= count - 1;
cfg->remapped = 1;
- irte_info = &cfg->irq_2_iommu;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info = &cfg->irq_2_irte;
+ irte_info->devid = devid;
+ irte_info->index = index;
goto out;
}
@@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
struct io_apic_irq_attr *attr)
{
struct irq_remap_table *table;
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
union irte irte;
int ioapic_id;
@@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
ioapic_id = mpc_ioapic_id(attr->ioapic);
devid = get_ioapic_devid(ioapic_id);
@@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
/* Setup IRQ remapping info */
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info->devid = devid;
+ irte_info->index = index;
/* Setup IRTE for IOMMU */
irte.val = 0;
@@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
static int set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
unsigned int dest, irq;
struct irq_cfg *cfg;
union irte irte;
@@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
cfg = data->chip_data;
irq = data->irq;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
- if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
+ if (get_irte(irte_info->devid, irte_info->index, &irte))
return -EBUSY;
if (assign_irq_vector(irq, cfg, mask))
@@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
irte.fields.vector = cfg->vector;
irte.fields.destination = dest;
- modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+ modify_irte(irte_info->devid, irte_info->index, irte);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
@@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
static int free_irq(int irq)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
cfg = irq_get_chip_data(irq);
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
- free_irte(irte_info->sub_handle, irte_info->irte_index);
+ free_irte(irte_info->devid, irte_info->index);
return 0;
}
@@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
union irte irte;
@@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
if (!cfg)
return;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
irte.val = 0;
irte.fields.vector = cfg->vector;
@@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev,
irte.fields.dm = apic->irq_dest_mode;
irte.fields.valid = 1;
- modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+ modify_irte(irte_info->devid, irte_info->index, irte);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo = MSI_ADDR_BASE_LO;
- msg->data = irte_info->irte_index;
+ msg->data = irte_info->index;
}
static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
@@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
int index, int offset)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
u16 devid;
@@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
return 0;
devid = get_device_id(&pdev->dev);
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index + offset;
+ irte_info->devid = devid;
+ irte_info->index = index + offset;
return 0;
}
static int setup_hpet_msi(unsigned int irq, unsigned int id)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
int index, devid;
@@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
devid = get_hpet_devid(id);
if (devid < 0)
return devid;
@@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
return index;
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info->devid = devid;
+ irte_info->index = index;
return 0;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 2f46881..bf51abb 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -213,6 +213,14 @@ enum iommu_init_state {
IOMMU_INIT_ERROR,
};
+/* Early ioapic and hpet maps from kernel command line */
+#define EARLY_MAP_SIZE 4
+static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
+static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
+static int __initdata early_ioapic_map_size;
+static int __initdata early_hpet_map_size;
+static bool __initdata cmdline_maps;
+
static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
@@ -703,31 +711,66 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
set_iommu_for_device(iommu, devid);
}
-static int add_special_device(u8 type, u8 id, u16 devid)
+static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
- if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
+ if (type == IVHD_SPECIAL_IOAPIC)
+ list = &ioapic_map;
+ else if (type == IVHD_SPECIAL_HPET)
+ list = &hpet_map;
+ else
return -EINVAL;
+ list_for_each_entry(entry, list, list) {
+ if (!(entry->id == id && entry->cmd_line))
+ continue;
+
+ pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
+ type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
+
+ return 0;
+ }
+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- entry->id = id;
- entry->devid = devid;
-
- if (type == IVHD_SPECIAL_IOAPIC)
- list = &ioapic_map;
- else
- list = &hpet_map;
+ entry->id = id;
+ entry->devid = devid;
+ entry->cmd_line = cmd_line;
list_add_tail(&entry->list, list);
return 0;
}
+static int __init add_early_maps(void)
+{
+ int i, ret;
+
+ for (i = 0; i < early_ioapic_map_size; ++i) {
+ ret = add_special_device(IVHD_SPECIAL_IOAPIC,
+ early_ioapic_map[i].id,
+ early_ioapic_map[i].devid,
+ early_ioapic_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < early_hpet_map_size; ++i) {
+ ret = add_special_device(IVHD_SPECIAL_HPET,
+ early_hpet_map[i].id,
+ early_hpet_map[i].devid,
+ early_hpet_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Reads the device exclusion range from ACPI and initializes the IOMMU with
* it
@@ -764,6 +807,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ int ret;
+
+
+ ret = add_early_maps();
+ if (ret)
+ return ret;
/*
* First save the recommended feature enable bits from ACPI
@@ -929,7 +978,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(devid));
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
- ret = add_special_device(type, handle, devid);
+ ret = add_special_device(type, handle, devid, false);
if (ret)
return ret;
break;
@@ -1275,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
amd_iommu_int_handler,
amd_iommu_int_thread,
0, "AMD-Vi",
- iommu->dev);
+ iommu);
if (r) {
pci_disable_msi(iommu->dev);
@@ -1638,18 +1687,28 @@ static void __init free_on_init_error(void)
static bool __init check_ioapic_information(void)
{
+ const char *fw_bug = FW_BUG;
bool ret, has_sb_ioapic;
int idx;
has_sb_ioapic = false;
ret = false;
+ /*
+ * If we have map overrides on the kernel command line the
+ * messages in this function might not describe firmware bugs
+ * anymore - so be careful
+ */
+ if (cmdline_maps)
+ fw_bug = "";
+
for (idx = 0; idx < nr_ioapics; idx++) {
int devid, id = mpc_ioapic_id(idx);
devid = get_ioapic_devid(id);
if (devid < 0) {
- pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
+ pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
+ fw_bug, id);
ret = false;
} else if (devid == IOAPIC_SB_DEVID) {
has_sb_ioapic = true;
@@ -1666,11 +1725,11 @@ static bool __init check_ioapic_information(void)
* when the BIOS is buggy and provides us the wrong
* device id for the IOAPIC in the system.
*/
- pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
+ pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
}
if (!ret)
- pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
+ pr_err("AMD-Vi: Disabling interrupt remapping\n");
return ret;
}
@@ -1801,6 +1860,7 @@ static int __init early_amd_iommu_init(void)
* Interrupt remapping enabled, create kmem_cache for the
* remapping tables.
*/
+ ret = -ENOMEM;
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
MAX_IRQS_PER_TABLE * sizeof(u32),
IRQ_TABLE_ALIGNMENT,
@@ -2097,8 +2157,70 @@ static int __init parse_amd_iommu_options(char *str)
return 1;
}
-__setup("amd_iommu_dump", parse_amd_iommu_dump);
-__setup("amd_iommu=", parse_amd_iommu_options);
+static int __init parse_ivrs_ioapic(char *str)
+{
+ unsigned int bus, dev, fn;
+ int ret, id, i;
+ u16 devid;
+
+ ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+
+ if (ret != 4) {
+ pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+ }
+
+ if (early_ioapic_map_size == EARLY_MAP_SIZE) {
+ pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
+ str);
+ return 1;
+ }
+
+ devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+
+ cmdline_maps = true;
+ i = early_ioapic_map_size++;
+ early_ioapic_map[i].id = id;
+ early_ioapic_map[i].devid = devid;
+ early_ioapic_map[i].cmd_line = true;
+
+ return 1;
+}
+
+static int __init parse_ivrs_hpet(char *str)
+{
+ unsigned int bus, dev, fn;
+ int ret, id, i;
+ u16 devid;
+
+ ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+
+ if (ret != 4) {
+ pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+ }
+
+ if (early_hpet_map_size == EARLY_MAP_SIZE) {
+ pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
+ str);
+ return 1;
+ }
+
+ devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+
+ cmdline_maps = true;
+ i = early_hpet_map_size++;
+ early_hpet_map[i].id = id;
+ early_hpet_map[i].devid = devid;
+ early_hpet_map[i].cmd_line = true;
+
+ return 1;
+}
+
+__setup("amd_iommu_dump", parse_amd_iommu_dump);
+__setup("amd_iommu=", parse_amd_iommu_options);
+__setup("ivrs_ioapic", parse_ivrs_ioapic);
+__setup("ivrs_hpet", parse_ivrs_hpet);
IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index ec36cf6..0285a21 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -100,6 +100,7 @@
#define PASID_MASK 0x000fffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
@@ -589,6 +590,7 @@ struct devid_map {
struct list_head list;
u8 id;
u16 devid;
+ bool cmd_line;
};
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index b8008f6..a7967ce 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -646,7 +646,7 @@ out:
int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
- u32 ver;
+ u32 ver, sts;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
@@ -696,6 +696,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
(unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap);
+ /* Reflect status in gcmd */
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
+ if (sts & DMA_GSTS_IRES)
+ iommu->gcmd |= DMA_GCMD_IRE;
+ if (sts & DMA_GSTS_TES)
+ iommu->gcmd |= DMA_GCMD_TE;
+ if (sts & DMA_GSTS_QIES)
+ iommu->gcmd |= DMA_GCMD_QIE;
+
raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu;
@@ -1205,7 +1214,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
/* TBD: ignore advanced fault log currently */
if (!(fault_status & DMA_FSTS_PPF))
- goto clear_rest;
+ goto unlock_exit;
fault_index = dma_fsts_fault_record_index(fault_status);
reg = cap_fault_reg_offset(iommu->cap);
@@ -1246,11 +1255,10 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index = 0;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
-clear_rest:
- /* clear all the other faults */
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- writel(fault_status, iommu->reg + DMAR_FSTS_REG);
+ writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
+
+unlock_exit:
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return IRQ_HANDLED;
}
@@ -1298,6 +1306,7 @@ int __init enable_drhd_fault_handling(void)
for_each_drhd_unit(drhd) {
int ret;
struct intel_iommu *iommu = drhd->iommu;
+ u32 fault_status;
ret = dmar_set_interrupt(iommu);
if (ret) {
@@ -1310,6 +1319,8 @@ int __init enable_drhd_fault_handling(void)
* Clear any previous faults.
*/
dmar_fault(iommu->irq, iommu);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ writel(fault_status, iommu->reg + DMAR_FSTS_REG);
}
return 0;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 238a3ca..3f32d64 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1027,7 +1027,7 @@ done:
}
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct exynos_iommu_domain *priv = domain->priv;
unsigned long *entry;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0099667..b4f0e28 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -47,6 +47,7 @@
#include <asm/iommu.h>
#include "irq_remapping.h"
+#include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -3665,6 +3666,7 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
+ struct dmar_drhd_unit *drhd;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3675,6 +3677,20 @@ int __init intel_iommu_init(void)
return -ENODEV;
}
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu;
+
+ if (drhd->ignored)
+ continue;
+
+ iommu = drhd->iommu;
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+ }
+
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
@@ -4111,7 +4127,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct dmar_domain *dmar_domain = domain->priv;
struct dma_pte *pte;
@@ -4137,12 +4153,6 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
-static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
-{
- pci_dev_put(*from);
- *from = to;
-}
-
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
static int intel_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f3b8f23..5b19b2d 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -524,6 +524,16 @@ static int __init intel_irq_remapping_supported(void)
if (disable_irq_remap)
return 0;
+ if (irq_remap_broken) {
+ WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+ "This system BIOS has enabled interrupt remapping\n"
+ "on a chipset that contains an erratum making that\n"
+ "feature unstable. To maintain system stability\n"
+ "interrupt remapping is being disabled. Please\n"
+ "contact your BIOS vendor for an update\n");
+ disable_irq_remap = 1;
+ return 0;
+ }
if (!dmar_ir_support())
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b972d43..d8f98b1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -204,6 +204,35 @@ again:
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);
+struct iommu_group *iommu_group_get_by_id(int id)
+{
+ struct kobject *group_kobj;
+ struct iommu_group *group;
+ const char *name;
+
+ if (!iommu_group_kset)
+ return NULL;
+
+ name = kasprintf(GFP_KERNEL, "%d", id);
+ if (!name)
+ return NULL;
+
+ group_kobj = kset_find_obj(iommu_group_kset, name);
+ kfree(name);
+
+ if (!group_kobj)
+ return NULL;
+
+ group = container_of(group_kobj, struct iommu_group, kobj);
+ BUG_ON(group->id != id);
+
+ kobject_get(group->devices_kobj);
+ kobject_put(&group->kobj);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
+
/**
* iommu_group_get_iommudata - retrieve iommu_data registered for a group
* @group: the group
@@ -706,8 +735,7 @@ void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_detach_group);
-phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
if (unlikely(domain->ops->iova_to_phys == NULL))
return 0;
@@ -854,12 +882,13 @@ EXPORT_SYMBOL_GPL(iommu_unmap);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size)
+ phys_addr_t paddr, u64 size, int prot)
{
if (unlikely(domain->ops->domain_window_enable == NULL))
return -ENODEV;
- return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size);
+ return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
+ prot);
}
EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 7c11ff3..dcfea4e 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -18,6 +18,7 @@
int irq_remapping_enabled;
int disable_irq_remap;
+int irq_remap_broken;
int disable_sourceid_checking;
int no_x2apic_optout;
@@ -210,6 +211,11 @@ void __init setup_irq_remapping_ops(void)
#endif
}
+void set_irq_remapping_broken(void)
+{
+ irq_remap_broken = 1;
+}
+
int irq_remapping_supported(void)
{
if (disable_irq_remap)
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index ecb6376..90c4dae 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -32,6 +32,7 @@ struct pci_dev;
struct msi_msg;
extern int disable_irq_remap;
+extern int irq_remap_broken;
extern int disable_sourceid_checking;
extern int no_x2apic_optout;
extern int irq_remapping_enabled;
@@ -89,6 +90,7 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
#define irq_remapping_enabled 0
#define disable_irq_remap 1
+#define irq_remap_broken 0
#endif /* CONFIG_IRQ_REMAP */
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a8870a..8ab4f41 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -554,7 +554,7 @@ fail:
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long va)
+ dma_addr_t va)
{
struct msm_priv *priv;
struct msm_iommu_drvdata *iommu_drvdata;
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6ac02fa..e02e5d7 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1219,7 +1219,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
}
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long da)
+ dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
diff --git a/drivers/iommu/pci.h b/drivers/iommu/pci.h
new file mode 100644
index 0000000..352d80a
--- /dev/null
+++ b/drivers/iommu/pci.h
@@ -0,0 +1,29 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef __IOMMU_PCI_H
+#define __IOMMU_PCI_H
+
+/* Helper function for swapping pci device reference */
+static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
+{
+ pci_dev_put(*from);
+ *from = to;
+}
+
+#endif /* __IOMMU_PCI_H */
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index b6e8b57..d572863 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -296,7 +296,7 @@ done:
}
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct shmobile_iommu_domain *sh_domain = domain->priv;
uint32_t l1entry = 0, l2entry = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 8643757..108c0e9 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -279,7 +279,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
}
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct gart_device *gart = domain->priv;
unsigned long pte;
@@ -295,7 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
pa = (pte & GART_PAGE_MASK);
if (!pfn_valid(__phys_to_pfn(pa))) {
- dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa);
+ dev_err(gart->dev, "No entry for %08llx:%08x\n",
+ (unsigned long long)iova, pa);
gart_dump_table(gart);
return -EINVAL;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index b34e5fd..f6f120e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -757,7 +757,7 @@ static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
}
static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct smmu_as *as = domain->priv;
unsigned long *pte;
@@ -772,7 +772,8 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & SMMU_PFN_MASK;
WARN_ON(!pfn_valid(pfn));
dev_dbg(as->smmu->dev,
- "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
+ "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova,
+ pfn, as->asid);
spin_unlock_irqrestore(&as->lock, flags);
return PFN_PHYS(pfn);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index a350969..4a33351 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -25,6 +25,14 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config RENESAS_INTC_IRQPIN
+ bool
+ select IRQ_DOMAIN
+
+config RENESAS_IRQC
+ bool
+ select IRQ_DOMAIN
+
config VERSATILE_FPGA_IRQ
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 98e3b87..cda4cb5 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -2,10 +2,17 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
+obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
+obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
-obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
+obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
+obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 04d86a9..a9d2b2f 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -12,12 +12,16 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/mach/irq.h>
+#ifdef CONFIG_EXYNOS_ATAGS
#include <plat/cpu.h>
+#endif
#include "irqchip.h"
@@ -25,16 +29,18 @@
#define COMBINER_ENABLE_CLEAR 0x4
#define COMBINER_INT_STATUS 0xC
+#define IRQ_IN_COMBINER 8
+
static DEFINE_SPINLOCK(irq_controller_lock);
struct combiner_chip_data {
- unsigned int irq_offset;
+ unsigned int hwirq_offset;
unsigned int irq_mask;
void __iomem *base;
+ unsigned int parent_irq;
};
static struct irq_domain *combiner_irq_domain;
-static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
static inline void __iomem *combiner_base(struct irq_data *data)
{
@@ -75,11 +81,11 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
if (status == 0)
goto out;
- combiner_irq = __ffs(status);
+ combiner_irq = chip_data->hwirq_offset + __ffs(status);
+ cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
- cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
- if (unlikely(cascade_irq >= NR_IRQS))
- do_bad_IRQ(cascade_irq, desc);
+ if (unlikely(!cascade_irq))
+ do_bad_IRQ(irq, desc);
else
generic_handle_irq(cascade_irq);
@@ -87,39 +93,49 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static struct irq_chip combiner_chip = {
- .name = "COMBINER",
- .irq_mask = combiner_mask_irq,
- .irq_unmask = combiner_unmask_irq,
-};
-
-static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
+#ifdef CONFIG_SMP
+static int combiner_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
{
- unsigned int max_nr;
+ struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
+ struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
- if (soc_is_exynos5250())
- max_nr = EXYNOS5_MAX_COMBINER_NR;
+ if (chip && chip->irq_set_affinity)
+ return chip->irq_set_affinity(data, mask_val, force);
else
- max_nr = EXYNOS4_MAX_COMBINER_NR;
+ return -EINVAL;
+}
+#endif
- if (combiner_nr >= max_nr)
- BUG();
- if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
+static struct irq_chip combiner_chip = {
+ .name = "COMBINER",
+ .irq_mask = combiner_mask_irq,
+ .irq_unmask = combiner_unmask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = combiner_set_affinity,
+#endif
+};
+
+static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
+ unsigned int irq)
+{
+ if (irq_set_handler_data(irq, combiner_data) != 0)
BUG();
irq_set_chained_handler(irq, combiner_handle_cascade_irq);
}
-static void __init combiner_init_one(unsigned int combiner_nr,
- void __iomem *base)
+static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
+ unsigned int combiner_nr,
+ void __iomem *base, unsigned int irq)
{
- combiner_data[combiner_nr].base = base;
- combiner_data[combiner_nr].irq_offset = irq_find_mapping(
- combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
- combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
+ combiner_data->base = base;
+ combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
+ combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
+ combiner_data->parent_irq = irq;
/* Disable all interrupts */
- __raw_writel(combiner_data[combiner_nr].irq_mask,
- base + COMBINER_ENABLE_CLEAR);
+ __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
}
#ifdef CONFIG_OF
@@ -135,7 +151,7 @@ static int combiner_irq_domain_xlate(struct irq_domain *d,
if (intsize < 2)
return -EINVAL;
- *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+ *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
*out_type = 0;
return 0;
@@ -154,6 +170,8 @@ static int combiner_irq_domain_xlate(struct irq_domain *d,
static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
+ struct combiner_chip_data *combiner_data = d->host_data;
+
irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
irq_set_chip_data(irq, &combiner_data[hw >> 3]);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
@@ -166,46 +184,61 @@ static struct irq_domain_ops combiner_irq_domain_ops = {
.map = combiner_irq_domain_map,
};
-void __init combiner_init(void __iomem *combiner_base,
- struct device_node *np)
+static unsigned int combiner_lookup_irq(int group)
{
- int i, irq, irq_base;
- unsigned int max_nr, nr_irq;
-
- if (np) {
- if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
- pr_warning("%s: number of combiners not specified, "
- "setting default as %d.\n",
- __func__, EXYNOS4_MAX_COMBINER_NR);
- max_nr = EXYNOS4_MAX_COMBINER_NR;
- }
- } else {
- max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
- EXYNOS4_MAX_COMBINER_NR;
+#ifdef CONFIG_EXYNOS_ATAGS
+ if (group < EXYNOS4210_MAX_COMBINER_NR || soc_is_exynos5250())
+ return IRQ_SPI(group);
+
+ switch (group) {
+ case 16:
+ return IRQ_SPI(107);
+ case 17:
+ return IRQ_SPI(108);
+ case 18:
+ return IRQ_SPI(48);
+ case 19:
+ return IRQ_SPI(42);
}
- nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
+#endif
+ return 0;
+}
+
+void __init combiner_init(void __iomem *combiner_base,
+ struct device_node *np,
+ unsigned int max_nr,
+ int irq_base)
+{
+ int i, irq;
+ unsigned int nr_irq;
+ struct combiner_chip_data *combiner_data;
+
+ nr_irq = max_nr * IRQ_IN_COMBINER;
- irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
- if (IS_ERR_VALUE(irq_base)) {
- irq_base = COMBINER_IRQ(0, 0);
- pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+ combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
+ if (!combiner_data) {
+ pr_warning("%s: could not allocate combiner data\n", __func__);
+ return;
}
- combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
- &combiner_irq_domain_ops, &combiner_data);
+ combiner_irq_domain = irq_domain_add_simple(np, nr_irq, irq_base,
+ &combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
pr_warning("%s: irq domain init failed\n", __func__);
return;
}
for (i = 0; i < max_nr; i++) {
- combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
- irq = IRQ_SPI(i);
#ifdef CONFIG_OF
if (np)
irq = irq_of_parse_and_map(np, i);
+ else
#endif
- combiner_cascade_irq(i, irq);
+ irq = combiner_lookup_irq(i);
+
+ combiner_init_one(&combiner_data[i], i,
+ combiner_base + (i >> 2) * 0x10, irq);
+ combiner_cascade_irq(&combiner_data[i], irq);
}
}
@@ -214,6 +247,8 @@ static int __init combiner_of_init(struct device_node *np,
struct device_node *parent)
{
void __iomem *combiner_base;
+ unsigned int max_nr = 20;
+ int irq_base = -1;
combiner_base = of_iomap(np, 0);
if (!combiner_base) {
@@ -221,7 +256,20 @@ static int __init combiner_of_init(struct device_node *np,
return -ENXIO;
}
- combiner_init(combiner_base, np);
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_info("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, max_nr);
+ }
+
+ /*
+ * FIXME: This is a hardwired COMBINER_IRQ(0,0). Once all devices
+ * get their IRQ from DT, remove this in order to get dynamic
+ * allocation.
+ */
+ irq_base = 160;
+
+ combiner_init(combiner_base, np, max_nr, irq_base);
return 0;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
new file mode 100644
index 0000000..bb328a3
--- /dev/null
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -0,0 +1,288 @@
+/*
+ * Marvell Armada 370 and Armada XP SoC IRQ handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Ben Dooks <ben.dooks@codethink.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <asm/mach/arch.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+/* Interrupt Controller Registers Map */
+#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
+#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
+
+#define ARMADA_370_XP_INT_CONTROL (0x00)
+#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
+#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+
+#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+
+#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
+#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
+#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
+
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+
+#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
+
+#define IPI_DOORBELL_START (0)
+#define IPI_DOORBELL_END (8)
+#define IPI_DOORBELL_MASK 0xFF
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+static void __iomem *per_cpu_int_base;
+static void __iomem *main_int_base;
+static struct irq_domain *armada_370_xp_mpic_domain;
+
+/*
+ * In SMP mode:
+ * For shared global interrupts, mask/unmask global enable bit
+ * For CPU interrupts, mask/unmask the calling CPU's bit
+ */
+static void armada_370_xp_irq_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ writel(hwirq, main_int_base +
+ ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ else
+ writel(hwirq, per_cpu_int_base +
+ ARMADA_370_XP_INT_SET_MASK_OFFS);
+}
+
+static void armada_370_xp_irq_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ writel(hwirq, main_int_base +
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ else
+ writel(hwirq, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+#ifdef CONFIG_SMP
+static int armada_xp_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ unsigned long reg;
+ unsigned long new_mask = 0;
+ unsigned long online_mask = 0;
+ unsigned long count = 0;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int cpu;
+
+ for_each_cpu(cpu, mask_val) {
+ new_mask |= 1 << cpu_logical_map(cpu);
+ count++;
+ }
+
+ /*
+ * Forbid mutlicore interrupt affinity
+ * This is required since the MPIC HW doesn't limit
+ * several CPUs from acknowledging the same interrupt.
+ */
+ if (count > 1)
+ return -EINVAL;
+
+ for_each_cpu(cpu, cpu_online_mask)
+ online_mask |= 1 << cpu_logical_map(cpu);
+
+ raw_spin_lock(&irq_controller_lock);
+
+ reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+ reg = (reg & (~online_mask)) | new_mask;
+ writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ return 0;
+}
+#endif
+
+static struct irq_chip armada_370_xp_irq_chip = {
+ .name = "armada_370_xp_irq",
+ .irq_mask = armada_370_xp_irq_mask,
+ .irq_mask_ack = armada_370_xp_irq_mask,
+ .irq_unmask = armada_370_xp_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = armada_xp_set_affinity,
+#endif
+};
+
+static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ writel(hw, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ else
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_percpu_devid_irq);
+
+ } else {
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_level_irq);
+ }
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
+{
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* submit softirq */
+ writel((map << 8) | irq, main_int_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS);
+}
+
+void armada_xp_mpic_smp_cpu_init(void)
+{
+ /* Clear pending IPIs */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ /* Enable first 8 IPIs */
+ writel(IPI_DOORBELL_MASK, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ /* Unmask IPI interrupt */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+#endif /* CONFIG_SMP */
+
+static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
+ .map = armada_370_xp_mpic_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static asmlinkage void __exception_irq_entry
+armada_370_xp_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+
+ do {
+ irqstat = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_CPU_INTACK_OFFS);
+ irqnr = irqstat & 0x3FF;
+
+ if (irqnr > 1022)
+ break;
+
+ if (irqnr > 0) {
+ irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
+ irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+#ifdef CONFIG_SMP
+ /* IPI Handling */
+ if (irqnr == 0) {
+ u32 ipimask, ipinr;
+
+ ipimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & IPI_DOORBELL_MASK;
+
+ writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ /* Handle all pending doorbells */
+ for (ipinr = IPI_DOORBELL_START;
+ ipinr < IPI_DOORBELL_END; ipinr++) {
+ if (ipimask & (0x1 << ipinr))
+ handle_IPI(ipinr, regs);
+ }
+ continue;
+ }
+#endif
+
+ } while (1);
+}
+
+static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ u32 control;
+
+ main_int_base = of_iomap(node, 0);
+ per_cpu_int_base = of_iomap(node, 1);
+
+ BUG_ON(!main_int_base);
+ BUG_ON(!per_cpu_int_base);
+
+ control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+
+ armada_370_xp_mpic_domain =
+ irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+ &armada_370_xp_mpic_irq_ops, NULL);
+
+ if (!armada_370_xp_mpic_domain)
+ panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
+
+ irq_set_default_host(armada_370_xp_mpic_domain);
+
+#ifdef CONFIG_SMP
+ armada_xp_mpic_smp_cpu_init();
+
+ /*
+ * Set the default affinity from all CPUs to the boot cpu.
+ * This is required since the MPIC doesn't limit several CPUs
+ * from acknowledging the same interrupt.
+ */
+ cpumask_clear(irq_default_affinity);
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+
+#endif
+
+ set_handle_irq(armada_370_xp_handle_irq);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index fc6aebf..1760ceb 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/cpumask.h>
#include <linux/io.h>
@@ -38,12 +39,12 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
-#include <asm/mach/irq.h>
#include "irqchip.h"
@@ -127,7 +128,7 @@ static inline void gic_set_base_accessor(struct gic_chip_data *data,
#else
#define gic_data_dist_base(d) ((d)->dist_base.common_base)
#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
-#define gic_set_base_accessor(d,f)
+#define gic_set_base_accessor(d, f)
#endif
static inline void __iomem *gic_dist_base(struct irq_data *d)
@@ -324,7 +325,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
if (unlikely(gic_irq < 32 || gic_irq > 1020))
- do_bad_IRQ(cascade_irq, desc);
+ handle_bad_irq(cascade_irq, desc);
else
generic_handle_irq(cascade_irq);
@@ -700,6 +701,25 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
return 0;
}
+#ifdef CONFIG_SMP
+static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_STARTING)
+ gic_cpu_init(&gic_data[0]);
+ return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block __cpuinitdata gic_cpu_notifier = {
+ .notifier_call = gic_secondary_init,
+ .priority = 100,
+};
+#endif
+
const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.xlate = gic_irq_domain_xlate,
@@ -790,6 +810,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
+ register_cpu_notifier(&gic_cpu_notifier);
#endif
set_handle_irq(gic_handle_irq);
@@ -800,15 +821,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_pm_init(gic);
}
-void __cpuinit gic_secondary_init(unsigned int gic_nr)
-{
- BUG_ON(gic_nr >= MAX_GIC_NR);
-
- gic_cpu_init(&gic_data[gic_nr]);
-}
-
#ifdef CONFIG_OF
-static int gic_cnt __initdata = 0;
+static int gic_cnt __initdata;
int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
new file mode 100644
index 0000000..29889bb
--- /dev/null
+++ b/drivers/irqchip/irq-mxs.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/stmp_device.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define HW_ICOLL_VECTOR 0x0000
+#define HW_ICOLL_LEVELACK 0x0010
+#define HW_ICOLL_CTRL 0x0020
+#define HW_ICOLL_STAT_OFFSET 0x0070
+#define HW_ICOLL_INTERRUPTn_SET(n) (0x0124 + (n) * 0x10)
+#define HW_ICOLL_INTERRUPTn_CLR(n) (0x0128 + (n) * 0x10)
+#define BM_ICOLL_INTERRUPTn_ENABLE 0x00000004
+#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1
+
+#define ICOLL_NUM_IRQS 128
+
+static void __iomem *icoll_base;
+static struct irq_domain *icoll_domain;
+
+static void icoll_ack_irq(struct irq_data *d)
+{
+ /*
+ * The Interrupt Collector is able to prioritize irqs.
+ * Currently only level 0 is used. So acking can use
+ * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
+ */
+ __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
+ icoll_base + HW_ICOLL_LEVELACK);
+}
+
+static void icoll_mask_irq(struct irq_data *d)
+{
+ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
+ icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq));
+}
+
+static void icoll_unmask_irq(struct irq_data *d)
+{
+ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
+ icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq));
+}
+
+static struct irq_chip mxs_icoll_chip = {
+ .irq_ack = icoll_ack_irq,
+ .irq_mask = icoll_mask_irq,
+ .irq_unmask = icoll_unmask_irq,
+};
+
+asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+{
+ u32 irqnr;
+
+ do {
+ irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
+ if (irqnr != 0x7f) {
+ __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
+ irqnr = irq_find_mapping(icoll_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+ break;
+ } while (1);
+}
+
+static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops icoll_irq_domain_ops = {
+ .map = icoll_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void __init icoll_of_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ icoll_base = of_iomap(np, 0);
+ WARN_ON(!icoll_base);
+
+ /*
+ * Interrupt Collector reset, which initializes the priority
+ * for each irq to level 0.
+ */
+ stmp_reset_block(icoll_base + HW_ICOLL_CTRL);
+
+ icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS,
+ &icoll_irq_domain_ops, NULL);
+ WARN_ON(!icoll_domain);
+}
+IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
new file mode 100644
index 0000000..5a68e5a
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -0,0 +1,547 @@
+/*
+ * Renesas INTC External IRQ Pin Driver
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_data/irq-renesas-intc-irqpin.h>
+
+#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
+
+#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */
+#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */
+#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
+#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
+#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
+#define INTC_IRQPIN_REG_NR 5
+
+/* INTC external IRQ PIN hardware register access:
+ *
+ * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*)
+ * PRIO is read-write 32-bit with 4-bits per IRQ (**)
+ * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ *
+ * (*) May be accessed by more than one driver instance - lock needed
+ * (**) Read-modify-write access by one driver instance - lock needed
+ * (***) Accessed by one driver instance only - no locking needed
+ */
+
+struct intc_irqpin_iomem {
+ void __iomem *iomem;
+ unsigned long (*read)(void __iomem *iomem);
+ void (*write)(void __iomem *iomem, unsigned long data);
+ int width;
+};
+
+struct intc_irqpin_irq {
+ int hw_irq;
+ int requested_irq;
+ int domain_irq;
+ struct intc_irqpin_priv *p;
+};
+
+struct intc_irqpin_priv {
+ struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR];
+ struct intc_irqpin_irq irq[INTC_IRQPIN_MAX];
+ struct renesas_intc_irqpin_config config;
+ unsigned int number_of_irqs;
+ struct platform_device *pdev;
+ struct irq_chip irq_chip;
+ struct irq_domain *irq_domain;
+ bool shared_irqs;
+ u8 shared_irq_mask;
+};
+
+static unsigned long intc_irqpin_read32(void __iomem *iomem)
+{
+ return ioread32(iomem);
+}
+
+static unsigned long intc_irqpin_read8(void __iomem *iomem)
+{
+ return ioread8(iomem);
+}
+
+static void intc_irqpin_write32(void __iomem *iomem, unsigned long data)
+{
+ iowrite32(data, iomem);
+}
+
+static void intc_irqpin_write8(void __iomem *iomem, unsigned long data)
+{
+ iowrite8(data, iomem);
+}
+
+static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
+ int reg)
+{
+ struct intc_irqpin_iomem *i = &p->iomem[reg];
+
+ return i->read(i->iomem);
+}
+
+static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
+ int reg, unsigned long data)
+{
+ struct intc_irqpin_iomem *i = &p->iomem[reg];
+
+ i->write(i->iomem, data);
+}
+
+static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
+ int reg, int hw_irq)
+{
+ return BIT((p->iomem[reg].width - 1) - hw_irq);
+}
+
+static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
+ int reg, int hw_irq)
+{
+ intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
+}
+
+static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */
+
+static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
+ int reg, int shift,
+ int width, int value)
+{
+ unsigned long flags;
+ unsigned long tmp;
+
+ raw_spin_lock_irqsave(&intc_irqpin_lock, flags);
+
+ tmp = intc_irqpin_read(p, reg);
+ tmp &= ~(((1 << width) - 1) << shift);
+ tmp |= value << shift;
+ intc_irqpin_write(p, reg, tmp);
+
+ raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags);
+}
+
+static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
+ int irq, int do_mask)
+{
+ int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
+ int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
+
+ intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
+ shift, bitfield_width,
+ do_mask ? 0 : (1 << bitfield_width) - 1);
+}
+
+static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
+{
+ int bitfield_width = p->config.sense_bitfield_width;
+ int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
+
+ dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
+
+ if (value >= (1 << bitfield_width))
+ return -EINVAL;
+
+ intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
+ bitfield_width, value);
+ return 0;
+}
+
+static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str)
+{
+ dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
+ str, i->requested_irq, i->hw_irq, i->domain_irq);
+}
+
+static void intc_irqpin_irq_enable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "enable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
+}
+
+static void intc_irqpin_irq_disable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "disable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
+}
+
+static void intc_irqpin_shared_irq_enable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
+
+ p->shared_irq_mask &= ~BIT(hw_irq);
+}
+
+static void intc_irqpin_shared_irq_disable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
+
+ p->shared_irq_mask |= BIT(hw_irq);
+}
+
+static void intc_irqpin_irq_enable_force(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
+
+ intc_irqpin_irq_enable(d);
+
+ /* enable interrupt through parent interrupt controller,
+ * assumes non-shared interrupt with 1:1 mapping
+ * needed for busted IRQs on some SoCs like sh73a0
+ */
+ irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
+}
+
+static void intc_irqpin_irq_disable_force(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
+
+ /* disable interrupt through parent interrupt controller,
+ * assumes non-shared interrupt with 1:1 mapping
+ * needed for busted IRQs on some SoCs like sh73a0
+ */
+ irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
+ intc_irqpin_irq_disable(d);
+}
+
+#define INTC_IRQ_SENSE_VALID 0x10
+#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
+
+static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = {
+ [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00),
+ [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01),
+ [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02),
+ [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03),
+ [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04),
+};
+
+static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK];
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+
+ if (!(value & INTC_IRQ_SENSE_VALID))
+ return -EINVAL;
+
+ return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
+ value ^ INTC_IRQ_SENSE_VALID);
+}
+
+static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id)
+{
+ struct intc_irqpin_irq *i = dev_id;
+ struct intc_irqpin_priv *p = i->p;
+ unsigned long bit;
+
+ intc_irqpin_dbg(i, "demux1");
+ bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
+
+ if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
+ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
+ intc_irqpin_dbg(i, "demux2");
+ generic_handle_irq(i->domain_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
+{
+ struct intc_irqpin_priv *p = dev_id;
+ unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
+ irqreturn_t status = IRQ_NONE;
+ int k;
+
+ for (k = 0; k < 8; k++) {
+ if (reg_source & BIT(7 - k)) {
+ if (BIT(k) & p->shared_irq_mask)
+ continue;
+
+ status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
+ }
+ }
+
+ return status;
+}
+
+static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct intc_irqpin_priv *p = h->host_data;
+
+ p->irq[hw].domain_irq = virq;
+ p->irq[hw].hw_irq = hw;
+
+ intc_irqpin_dbg(&p->irq[hw], "map");
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID); /* kill me now */
+ return 0;
+}
+
+static struct irq_domain_ops intc_irqpin_irq_domain_ops = {
+ .map = intc_irqpin_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int intc_irqpin_probe(struct platform_device *pdev)
+{
+ struct renesas_intc_irqpin_config *pdata = pdev->dev.platform_data;
+ struct intc_irqpin_priv *p;
+ struct intc_irqpin_iomem *i;
+ struct resource *io[INTC_IRQPIN_REG_NR];
+ struct resource *irq;
+ struct irq_chip *irq_chip;
+ void (*enable_fn)(struct irq_data *d);
+ void (*disable_fn)(struct irq_data *d);
+ const char *name = dev_name(&pdev->dev);
+ int ref_irq;
+ int ret;
+ int k;
+
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ /* deal with driver instance configuration */
+ if (pdata)
+ memcpy(&p->config, pdata, sizeof(*pdata));
+ if (!p->config.sense_bitfield_width)
+ p->config.sense_bitfield_width = 4; /* default to 4 bits */
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+
+ /* get hold of manadatory IOMEM */
+ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
+ io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
+ if (!io[k]) {
+ dev_err(&pdev->dev, "not enough IOMEM resources\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+ }
+
+ /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
+ for (k = 0; k < INTC_IRQPIN_MAX; k++) {
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
+ if (!irq)
+ break;
+
+ p->irq[k].p = p;
+ p->irq[k].requested_irq = irq->start;
+ }
+
+ p->number_of_irqs = k;
+ if (p->number_of_irqs < 1) {
+ dev_err(&pdev->dev, "not enough IRQ resources\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ /* ioremap IOMEM and setup read/write callbacks */
+ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
+ i = &p->iomem[k];
+
+ switch (resource_size(io[k])) {
+ case 1:
+ i->width = 8;
+ i->read = intc_irqpin_read8;
+ i->write = intc_irqpin_write8;
+ break;
+ case 4:
+ i->width = 32;
+ i->read = intc_irqpin_read32;
+ i->write = intc_irqpin_write32;
+ break;
+ default:
+ dev_err(&pdev->dev, "IOMEM size mismatch\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ i->iomem = devm_ioremap_nocache(&pdev->dev, io[k]->start,
+ resource_size(io[k]));
+ if (!i->iomem) {
+ dev_err(&pdev->dev, "failed to remap IOMEM\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+ }
+
+ /* mask all interrupts using priority */
+ for (k = 0; k < p->number_of_irqs; k++)
+ intc_irqpin_mask_unmask_prio(p, k, 1);
+
+ /* clear all pending interrupts */
+ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
+
+ /* scan for shared interrupt lines */
+ ref_irq = p->irq[0].requested_irq;
+ p->shared_irqs = true;
+ for (k = 1; k < p->number_of_irqs; k++) {
+ if (ref_irq != p->irq[k].requested_irq) {
+ p->shared_irqs = false;
+ break;
+ }
+ }
+
+ /* use more severe masking method if requested */
+ if (p->config.control_parent) {
+ enable_fn = intc_irqpin_irq_enable_force;
+ disable_fn = intc_irqpin_irq_disable_force;
+ } else if (!p->shared_irqs) {
+ enable_fn = intc_irqpin_irq_enable;
+ disable_fn = intc_irqpin_irq_disable;
+ } else {
+ enable_fn = intc_irqpin_shared_irq_enable;
+ disable_fn = intc_irqpin_shared_irq_disable;
+ }
+
+ irq_chip = &p->irq_chip;
+ irq_chip->name = name;
+ irq_chip->irq_mask = disable_fn;
+ irq_chip->irq_unmask = enable_fn;
+ irq_chip->irq_enable = enable_fn;
+ irq_chip->irq_disable = disable_fn;
+ irq_chip->irq_set_type = intc_irqpin_irq_set_type;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
+
+ p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
+ p->number_of_irqs,
+ p->config.irq_base,
+ &intc_irqpin_irq_domain_ops, p);
+ if (!p->irq_domain) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "cannot initialize irq domain\n");
+ goto err0;
+ }
+
+ if (p->shared_irqs) {
+ /* request one shared interrupt */
+ if (devm_request_irq(&pdev->dev, p->irq[0].requested_irq,
+ intc_irqpin_shared_irq_handler,
+ IRQF_SHARED, name, p)) {
+ dev_err(&pdev->dev, "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+ } else {
+ /* request interrupts one by one */
+ for (k = 0; k < p->number_of_irqs; k++) {
+ if (devm_request_irq(&pdev->dev,
+ p->irq[k].requested_irq,
+ intc_irqpin_irq_handler,
+ 0, name, &p->irq[k])) {
+ dev_err(&pdev->dev,
+ "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+ }
+ }
+
+ /* unmask all interrupts on prio level */
+ for (k = 0; k < p->number_of_irqs; k++)
+ intc_irqpin_mask_unmask_prio(p, k, 0);
+
+ dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
+
+ /* warn in case of mismatch if irq base is specified */
+ if (p->config.irq_base) {
+ if (p->config.irq_base != p->irq[0].domain_irq)
+ dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n",
+ p->config.irq_base, p->irq[0].domain_irq);
+ }
+
+ return 0;
+
+err1:
+ irq_domain_remove(p->irq_domain);
+err0:
+ return ret;
+}
+
+static int intc_irqpin_remove(struct platform_device *pdev)
+{
+ struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
+
+ irq_domain_remove(p->irq_domain);
+
+ return 0;
+}
+
+static const struct of_device_id intc_irqpin_dt_ids[] = {
+ { .compatible = "renesas,intc-irqpin", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
+
+static struct platform_driver intc_irqpin_device_driver = {
+ .probe = intc_irqpin_probe,
+ .remove = intc_irqpin_remove,
+ .driver = {
+ .name = "renesas_intc_irqpin",
+ .of_match_table = intc_irqpin_dt_ids,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init intc_irqpin_init(void)
+{
+ return platform_driver_register(&intc_irqpin_device_driver);
+}
+postcore_initcall(intc_irqpin_init);
+
+static void __exit intc_irqpin_exit(void)
+{
+ platform_driver_unregister(&intc_irqpin_device_driver);
+}
+module_exit(intc_irqpin_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
new file mode 100644
index 0000000..927bff3
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -0,0 +1,307 @@
+/*
+ * Renesas IRQC Driver
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_data/irq-renesas-irqc.h>
+
+#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
+
+#define IRQC_REQ_STS 0x00
+#define IRQC_EN_STS 0x04
+#define IRQC_EN_SET 0x08
+#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
+#define DETECT_STATUS 0x100
+#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
+
+struct irqc_irq {
+ int hw_irq;
+ int requested_irq;
+ int domain_irq;
+ struct irqc_priv *p;
+};
+
+struct irqc_priv {
+ void __iomem *iomem;
+ void __iomem *cpu_int_base;
+ struct irqc_irq irq[IRQC_IRQ_MAX];
+ struct renesas_irqc_config config;
+ unsigned int number_of_irqs;
+ struct platform_device *pdev;
+ struct irq_chip irq_chip;
+ struct irq_domain *irq_domain;
+};
+
+static void irqc_dbg(struct irqc_irq *i, char *str)
+{
+ dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
+ str, i->requested_irq, i->hw_irq, i->domain_irq);
+}
+
+static void irqc_irq_enable(struct irq_data *d)
+{
+ struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ irqc_dbg(&p->irq[hw_irq], "enable");
+ iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET);
+}
+
+static void irqc_irq_disable(struct irq_data *d)
+{
+ struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ irqc_dbg(&p->irq[hw_irq], "disable");
+ iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS);
+}
+
+#define INTC_IRQ_SENSE_VALID 0x10
+#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
+
+static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
+ [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x01),
+ [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x02),
+ [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x04), /* Synchronous */
+ [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x08), /* Synchronous */
+ [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x0c), /* Synchronous */
+};
+
+static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+ unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
+ unsigned long tmp;
+
+ irqc_dbg(&p->irq[hw_irq], "sense");
+
+ if (!(value & INTC_IRQ_SENSE_VALID))
+ return -EINVAL;
+
+ tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
+ tmp &= ~0x3f;
+ tmp |= value ^ INTC_IRQ_SENSE_VALID;
+ iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
+ return 0;
+}
+
+static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
+{
+ struct irqc_irq *i = dev_id;
+ struct irqc_priv *p = i->p;
+ unsigned long bit = BIT(i->hw_irq);
+
+ irqc_dbg(i, "demux1");
+
+ if (ioread32(p->iomem + DETECT_STATUS) & bit) {
+ iowrite32(bit, p->iomem + DETECT_STATUS);
+ irqc_dbg(i, "demux2");
+ generic_handle_irq(i->domain_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct irqc_priv *p = h->host_data;
+
+ p->irq[hw].domain_irq = virq;
+ p->irq[hw].hw_irq = hw;
+
+ irqc_dbg(&p->irq[hw], "map");
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID); /* kill me now */
+ return 0;
+}
+
+static struct irq_domain_ops irqc_irq_domain_ops = {
+ .map = irqc_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int irqc_probe(struct platform_device *pdev)
+{
+ struct renesas_irqc_config *pdata = pdev->dev.platform_data;
+ struct irqc_priv *p;
+ struct resource *io;
+ struct resource *irq;
+ struct irq_chip *irq_chip;
+ const char *name = dev_name(&pdev->dev);
+ int ret;
+ int k;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ /* deal with driver instance configuration */
+ if (pdata)
+ memcpy(&p->config, pdata, sizeof(*pdata));
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+
+ /* get hold of manadatory IOMEM */
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io) {
+ dev_err(&pdev->dev, "not enough IOMEM resources\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
+ for (k = 0; k < IRQC_IRQ_MAX; k++) {
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
+ if (!irq)
+ break;
+
+ p->irq[k].p = p;
+ p->irq[k].requested_irq = irq->start;
+ }
+
+ p->number_of_irqs = k;
+ if (p->number_of_irqs < 1) {
+ dev_err(&pdev->dev, "not enough IRQ resources\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ /* ioremap IOMEM and setup read/write callbacks */
+ p->iomem = ioremap_nocache(io->start, resource_size(io));
+ if (!p->iomem) {
+ dev_err(&pdev->dev, "failed to remap IOMEM\n");
+ ret = -ENXIO;
+ goto err2;
+ }
+
+ p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
+
+ irq_chip = &p->irq_chip;
+ irq_chip->name = name;
+ irq_chip->irq_mask = irqc_irq_disable;
+ irq_chip->irq_unmask = irqc_irq_enable;
+ irq_chip->irq_enable = irqc_irq_enable;
+ irq_chip->irq_disable = irqc_irq_disable;
+ irq_chip->irq_set_type = irqc_irq_set_type;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
+
+ p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
+ p->number_of_irqs,
+ p->config.irq_base,
+ &irqc_irq_domain_ops, p);
+ if (!p->irq_domain) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "cannot initialize irq domain\n");
+ goto err2;
+ }
+
+ /* request interrupts one by one */
+ for (k = 0; k < p->number_of_irqs; k++) {
+ if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
+ 0, name, &p->irq[k])) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ ret = -ENOENT;
+ goto err3;
+ }
+ }
+
+ dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
+
+ /* warn in case of mismatch if irq base is specified */
+ if (p->config.irq_base) {
+ if (p->config.irq_base != p->irq[0].domain_irq)
+ dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n",
+ p->config.irq_base, p->irq[0].domain_irq);
+ }
+
+ return 0;
+err3:
+ for (; k >= 0; k--)
+ free_irq(p->irq[k - 1].requested_irq, &p->irq[k - 1]);
+
+ irq_domain_remove(p->irq_domain);
+err2:
+ iounmap(p->iomem);
+err1:
+ kfree(p);
+err0:
+ return ret;
+}
+
+static int irqc_remove(struct platform_device *pdev)
+{
+ struct irqc_priv *p = platform_get_drvdata(pdev);
+ int k;
+
+ for (k = 0; k < p->number_of_irqs; k++)
+ free_irq(p->irq[k].requested_irq, &p->irq[k]);
+
+ irq_domain_remove(p->irq_domain);
+ iounmap(p->iomem);
+ kfree(p);
+ return 0;
+}
+
+static const struct of_device_id irqc_dt_ids[] = {
+ { .compatible = "renesas,irqc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, irqc_dt_ids);
+
+static struct platform_driver irqc_device_driver = {
+ .probe = irqc_probe,
+ .remove = irqc_remove,
+ .driver = {
+ .name = "renesas_irqc",
+ .of_match_table = irqc_dt_ids,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init irqc_init(void)
+{
+ return platform_driver_register(&irqc_device_driver);
+}
+postcore_initcall(irqc_init);
+
+static void __exit irqc_exit(void)
+{
+ platform_driver_unregister(&irqc_device_driver);
+}
+module_exit(irqc_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas IRQC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
new file mode 100644
index 0000000..bbcc944
--- /dev/null
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -0,0 +1,1356 @@
+/*
+ * S3C24XX IRQ handling
+ *
+ * Copyright (c) 2003-2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * Copyright (c) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/regs-irqtype.h>
+#include <plat/pm.h>
+
+#include "irqchip.h"
+
+#define S3C_IRQTYPE_NONE 0
+#define S3C_IRQTYPE_EINT 1
+#define S3C_IRQTYPE_EDGE 2
+#define S3C_IRQTYPE_LEVEL 3
+
+struct s3c_irq_data {
+ unsigned int type;
+ unsigned long offset;
+ unsigned long parent_irq;
+
+ /* data gets filled during init */
+ struct s3c_irq_intc *intc;
+ unsigned long sub_bits;
+ struct s3c_irq_intc *sub_intc;
+};
+
+/*
+ * Sructure holding the controller data
+ * @reg_pending register holding pending irqs
+ * @reg_intpnd special register intpnd in main intc
+ * @reg_mask mask register
+ * @domain irq_domain of the controller
+ * @parent parent controller for ext and sub irqs
+ * @irqs irq-data, always s3c_irq_data[32]
+ */
+struct s3c_irq_intc {
+ void __iomem *reg_pending;
+ void __iomem *reg_intpnd;
+ void __iomem *reg_mask;
+ struct irq_domain *domain;
+ struct s3c_irq_intc *parent;
+ struct s3c_irq_data *irqs;
+};
+
+/*
+ * Array holding pointers to the global controller structs
+ * [0] ... main_intc
+ * [1] ... sub_intc
+ * [2] ... main_intc2 on s3c2416
+ */
+static struct s3c_irq_intc *s3c_intc[3];
+
+static void s3c_irq_mask(struct irq_data *data)
+{
+ struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
+ struct s3c_irq_intc *intc = irq_data->intc;
+ struct s3c_irq_intc *parent_intc = intc->parent;
+ struct s3c_irq_data *parent_data;
+ unsigned long mask;
+ unsigned int irqno;
+
+ mask = __raw_readl(intc->reg_mask);
+ mask |= (1UL << irq_data->offset);
+ __raw_writel(mask, intc->reg_mask);
+
+ if (parent_intc) {
+ parent_data = &parent_intc->irqs[irq_data->parent_irq];
+
+ /* check to see if we need to mask the parent IRQ
+ * The parent_irq is always in main_intc, so the hwirq
+ * for find_mapping does not need an offset in any case.
+ */
+ if ((mask & parent_data->sub_bits) == parent_data->sub_bits) {
+ irqno = irq_find_mapping(parent_intc->domain,
+ irq_data->parent_irq);
+ s3c_irq_mask(irq_get_irq_data(irqno));
+ }
+ }
+}
+
+static void s3c_irq_unmask(struct irq_data *data)
+{
+ struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
+ struct s3c_irq_intc *intc = irq_data->intc;
+ struct s3c_irq_intc *parent_intc = intc->parent;
+ unsigned long mask;
+ unsigned int irqno;
+
+ mask = __raw_readl(intc->reg_mask);
+ mask &= ~(1UL << irq_data->offset);
+ __raw_writel(mask, intc->reg_mask);
+
+ if (parent_intc) {
+ irqno = irq_find_mapping(parent_intc->domain,
+ irq_data->parent_irq);
+ s3c_irq_unmask(irq_get_irq_data(irqno));
+ }
+}
+
+static inline void s3c_irq_ack(struct irq_data *data)
+{
+ struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
+ struct s3c_irq_intc *intc = irq_data->intc;
+ unsigned long bitval = 1UL << irq_data->offset;
+
+ __raw_writel(bitval, intc->reg_pending);
+ if (intc->reg_intpnd)
+ __raw_writel(bitval, intc->reg_intpnd);
+}
+
+static int s3c_irq_type(struct irq_data *data, unsigned int type)
+{
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler(data->irq, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler(data->irq, handle_level_irq);
+ break;
+ default:
+ pr_err("No such irq type %d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s3c_irqext_type_set(void __iomem *gpcon_reg,
+ void __iomem *extint_reg,
+ unsigned long gpcon_offset,
+ unsigned long extint_offset,
+ unsigned int type)
+{
+ unsigned long newvalue = 0, value;
+
+ /* Set the GPIO to external interrupt mode */
+ value = __raw_readl(gpcon_reg);
+ value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset);
+ __raw_writel(value, gpcon_reg);
+
+ /* Set the external interrupt to pointed trigger type */
+ switch (type)
+ {
+ case IRQ_TYPE_NONE:
+ pr_warn("No edge setting!\n");
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ newvalue = S3C2410_EXTINT_RISEEDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ newvalue = S3C2410_EXTINT_FALLEDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ newvalue = S3C2410_EXTINT_BOTHEDGE;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ newvalue = S3C2410_EXTINT_LOWLEV;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ newvalue = S3C2410_EXTINT_HILEV;
+ break;
+
+ default:
+ pr_err("No such irq type %d", type);
+ return -EINVAL;
+ }
+
+ value = __raw_readl(extint_reg);
+ value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset);
+ __raw_writel(value, extint_reg);
+
+ return 0;
+}
+
+static int s3c_irqext_type(struct irq_data *data, unsigned int type)
+{
+ void __iomem *extint_reg;
+ void __iomem *gpcon_reg;
+ unsigned long gpcon_offset, extint_offset;
+
+ if ((data->hwirq >= 4) && (data->hwirq <= 7)) {
+ gpcon_reg = S3C2410_GPFCON;
+ extint_reg = S3C24XX_EXTINT0;
+ gpcon_offset = (data->hwirq) * 2;
+ extint_offset = (data->hwirq) * 4;
+ } else if ((data->hwirq >= 8) && (data->hwirq <= 15)) {
+ gpcon_reg = S3C2410_GPGCON;
+ extint_reg = S3C24XX_EXTINT1;
+ gpcon_offset = (data->hwirq - 8) * 2;
+ extint_offset = (data->hwirq - 8) * 4;
+ } else if ((data->hwirq >= 16) && (data->hwirq <= 23)) {
+ gpcon_reg = S3C2410_GPGCON;
+ extint_reg = S3C24XX_EXTINT2;
+ gpcon_offset = (data->hwirq - 8) * 2;
+ extint_offset = (data->hwirq - 16) * 4;
+ } else {
+ return -EINVAL;
+ }
+
+ return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset,
+ extint_offset, type);
+}
+
+static int s3c_irqext0_type(struct irq_data *data, unsigned int type)
+{
+ void __iomem *extint_reg;
+ void __iomem *gpcon_reg;
+ unsigned long gpcon_offset, extint_offset;
+
+ if ((data->hwirq >= 0) && (data->hwirq <= 3)) {
+ gpcon_reg = S3C2410_GPFCON;
+ extint_reg = S3C24XX_EXTINT0;
+ gpcon_offset = (data->hwirq) * 2;
+ extint_offset = (data->hwirq) * 4;
+ } else {
+ return -EINVAL;
+ }
+
+ return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset,
+ extint_offset, type);
+}
+
+static struct irq_chip s3c_irq_chip = {
+ .name = "s3c",
+ .irq_ack = s3c_irq_ack,
+ .irq_mask = s3c_irq_mask,
+ .irq_unmask = s3c_irq_unmask,
+ .irq_set_type = s3c_irq_type,
+ .irq_set_wake = s3c_irq_wake
+};
+
+static struct irq_chip s3c_irq_level_chip = {
+ .name = "s3c-level",
+ .irq_mask = s3c_irq_mask,
+ .irq_unmask = s3c_irq_unmask,
+ .irq_ack = s3c_irq_ack,
+ .irq_set_type = s3c_irq_type,
+};
+
+static struct irq_chip s3c_irqext_chip = {
+ .name = "s3c-ext",
+ .irq_mask = s3c_irq_mask,
+ .irq_unmask = s3c_irq_unmask,
+ .irq_ack = s3c_irq_ack,
+ .irq_set_type = s3c_irqext_type,
+ .irq_set_wake = s3c_irqext_wake
+};
+
+static struct irq_chip s3c_irq_eint0t4 = {
+ .name = "s3c-ext0",
+ .irq_ack = s3c_irq_ack,
+ .irq_mask = s3c_irq_mask,
+ .irq_unmask = s3c_irq_unmask,
+ .irq_set_wake = s3c_irq_wake,
+ .irq_set_type = s3c_irqext0_type,
+};
+
+static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
+ struct s3c_irq_intc *intc = irq_data->intc;
+ struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
+ unsigned long src;
+ unsigned long msk;
+ unsigned int n;
+ unsigned int offset;
+
+ /* we're using individual domains for the non-dt case
+ * and one big domain for the dt case where the subintc
+ * starts at hwirq number 32.
+ */
+ offset = (intc->domain->of_node) ? 32 : 0;
+
+ chained_irq_enter(chip, desc);
+
+ src = __raw_readl(sub_intc->reg_pending);
+ msk = __raw_readl(sub_intc->reg_mask);
+
+ src &= ~msk;
+ src &= irq_data->sub_bits;
+
+ while (src) {
+ n = __ffs(src);
+ src &= ~(1 << n);
+ irq = irq_find_mapping(sub_intc->domain, offset + n);
+ generic_handle_irq(irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
+ struct pt_regs *regs, int intc_offset)
+{
+ int pnd;
+ int offset;
+ int irq;
+
+ pnd = __raw_readl(intc->reg_intpnd);
+ if (!pnd)
+ return false;
+
+ /* non-dt machines use individual domains */
+ if (!intc->domain->of_node)
+ intc_offset = 0;
+
+ /* We have a problem that the INTOFFSET register does not always
+ * show one interrupt. Occasionally we get two interrupts through
+ * the prioritiser, and this causes the INTOFFSET register to show
+ * what looks like the logical-or of the two interrupt numbers.
+ *
+ * Thanks to Klaus, Shannon, et al for helping to debug this problem
+ */
+ offset = __raw_readl(intc->reg_intpnd + 4);
+
+ /* Find the bit manually, when the offset is wrong.
+ * The pending register only ever contains the one bit of the next
+ * interrupt to handle.
+ */
+ if (!(pnd & (1 << offset)))
+ offset = __ffs(pnd);
+
+ irq = irq_find_mapping(intc->domain, intc_offset + offset);
+ handle_IRQ(irq, regs);
+ return true;
+}
+
+asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs)
+{
+ do {
+ if (likely(s3c_intc[0]))
+ if (s3c24xx_handle_intc(s3c_intc[0], regs, 0))
+ continue;
+
+ if (s3c_intc[2])
+ if (s3c24xx_handle_intc(s3c_intc[2], regs, 64))
+ continue;
+
+ break;
+ } while (1);
+}
+
+#ifdef CONFIG_FIQ
+/**
+ * s3c24xx_set_fiq - set the FIQ routing
+ * @irq: IRQ number to route to FIQ on processor.
+ * @on: Whether to route @irq to the FIQ, or to remove the FIQ routing.
+ *
+ * Change the state of the IRQ to FIQ routing depending on @irq and @on. If
+ * @on is true, the @irq is checked to see if it can be routed and the
+ * interrupt controller updated to route the IRQ. If @on is false, the FIQ
+ * routing is cleared, regardless of which @irq is specified.
+ */
+int s3c24xx_set_fiq(unsigned int irq, bool on)
+{
+ u32 intmod;
+ unsigned offs;
+
+ if (on) {
+ offs = irq - FIQ_START;
+ if (offs > 31)
+ return -EINVAL;
+
+ intmod = 1 << offs;
+ } else {
+ intmod = 0;
+ }
+
+ __raw_writel(intmod, S3C2410_INTMOD);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(s3c24xx_set_fiq);
+#endif
+
+static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct s3c_irq_intc *intc = h->host_data;
+ struct s3c_irq_data *irq_data = &intc->irqs[hw];
+ struct s3c_irq_intc *parent_intc;
+ struct s3c_irq_data *parent_irq_data;
+ unsigned int irqno;
+
+ /* attach controller pointer to irq_data */
+ irq_data->intc = intc;
+ irq_data->offset = hw;
+
+ parent_intc = intc->parent;
+
+ /* set handler and flags */
+ switch (irq_data->type) {
+ case S3C_IRQTYPE_NONE:
+ return 0;
+ case S3C_IRQTYPE_EINT:
+ /* On the S3C2412, the EINT0to3 have a parent irq
+ * but need the s3c_irq_eint0t4 chip
+ */
+ if (parent_intc && (!soc_is_s3c2412() || hw >= 4))
+ irq_set_chip_and_handler(virq, &s3c_irqext_chip,
+ handle_edge_irq);
+ else
+ irq_set_chip_and_handler(virq, &s3c_irq_eint0t4,
+ handle_edge_irq);
+ break;
+ case S3C_IRQTYPE_EDGE:
+ if (parent_intc || intc->reg_pending == S3C2416_SRCPND2)
+ irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
+ handle_edge_irq);
+ else
+ irq_set_chip_and_handler(virq, &s3c_irq_chip,
+ handle_edge_irq);
+ break;
+ case S3C_IRQTYPE_LEVEL:
+ if (parent_intc)
+ irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
+ handle_level_irq);
+ else
+ irq_set_chip_and_handler(virq, &s3c_irq_chip,
+ handle_level_irq);
+ break;
+ default:
+ pr_err("irq-s3c24xx: unsupported irqtype %d\n", irq_data->type);
+ return -EINVAL;
+ }
+
+ irq_set_chip_data(virq, irq_data);
+
+ set_irq_flags(virq, IRQF_VALID);
+
+ if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) {
+ if (irq_data->parent_irq > 31) {
+ pr_err("irq-s3c24xx: parent irq %lu is out of range\n",
+ irq_data->parent_irq);
+ goto err;
+ }
+
+ parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
+ parent_irq_data->sub_intc = intc;
+ parent_irq_data->sub_bits |= (1UL << hw);
+
+ /* attach the demuxer to the parent irq */
+ irqno = irq_find_mapping(parent_intc->domain,
+ irq_data->parent_irq);
+ if (!irqno) {
+ pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n",
+ irq_data->parent_irq);
+ goto err;
+ }
+ irq_set_chained_handler(irqno, s3c_irq_demux);
+ }
+
+ return 0;
+
+err:
+ set_irq_flags(virq, 0);
+
+ /* the only error can result from bad mapping data*/
+ return -EINVAL;
+}
+
+static struct irq_domain_ops s3c24xx_irq_ops = {
+ .map = s3c24xx_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void s3c24xx_clear_intc(struct s3c_irq_intc *intc)
+{
+ void __iomem *reg_source;
+ unsigned long pend;
+ unsigned long last;
+ int i;
+
+ /* if intpnd is set, read the next pending irq from there */
+ reg_source = intc->reg_intpnd ? intc->reg_intpnd : intc->reg_pending;
+
+ last = 0;
+ for (i = 0; i < 4; i++) {
+ pend = __raw_readl(reg_source);
+
+ if (pend == 0 || pend == last)
+ break;
+
+ __raw_writel(pend, intc->reg_pending);
+ if (intc->reg_intpnd)
+ __raw_writel(pend, intc->reg_intpnd);
+
+ pr_info("irq: clearing pending status %08x\n", (int)pend);
+ last = pend;
+ }
+}
+
+static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np,
+ struct s3c_irq_data *irq_data,
+ struct s3c_irq_intc *parent,
+ unsigned long address)
+{
+ struct s3c_irq_intc *intc;
+ void __iomem *base = (void *)0xf6000000; /* static mapping */
+ int irq_num;
+ int irq_start;
+ int ret;
+
+ intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
+ if (!intc)
+ return ERR_PTR(-ENOMEM);
+
+ intc->irqs = irq_data;
+
+ if (parent)
+ intc->parent = parent;
+
+ /* select the correct data for the controller.
+ * Need to hard code the irq num start and offset
+ * to preserve the static mapping for now
+ */
+ switch (address) {
+ case 0x4a000000:
+ pr_debug("irq: found main intc\n");
+ intc->reg_pending = base;
+ intc->reg_mask = base + 0x08;
+ intc->reg_intpnd = base + 0x10;
+ irq_num = 32;
+ irq_start = S3C2410_IRQ(0);
+ break;
+ case 0x4a000018:
+ pr_debug("irq: found subintc\n");
+ intc->reg_pending = base + 0x18;
+ intc->reg_mask = base + 0x1c;
+ irq_num = 29;
+ irq_start = S3C2410_IRQSUB(0);
+ break;
+ case 0x4a000040:
+ pr_debug("irq: found intc2\n");
+ intc->reg_pending = base + 0x40;
+ intc->reg_mask = base + 0x48;
+ intc->reg_intpnd = base + 0x50;
+ irq_num = 8;
+ irq_start = S3C2416_IRQ(0);
+ break;
+ case 0x560000a4:
+ pr_debug("irq: found eintc\n");
+ base = (void *)0xfd000000;
+
+ intc->reg_mask = base + 0xa4;
+ intc->reg_pending = base + 0xa8;
+ irq_num = 24;
+ irq_start = S3C2410_IRQ(32);
+ break;
+ default:
+ pr_err("irq: unsupported controller address\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* now that all the data is complete, init the irq-domain */
+ s3c24xx_clear_intc(intc);
+ intc->domain = irq_domain_add_legacy(np, irq_num, irq_start,
+ 0, &s3c24xx_irq_ops,
+ intc);
+ if (!intc->domain) {
+ pr_err("irq: could not create irq-domain\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ set_handle_irq(s3c24xx_handle_irq);
+
+ return intc;
+
+err:
+ kfree(intc);
+ return ERR_PTR(ret);
+}
+
+static struct s3c_irq_data init_eint[32] = {
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */
+};
+
+#ifdef CONFIG_CPU_S3C2410
+static struct s3c_irq_data init_s3c2410base[32] = {
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+ { .type = S3C_IRQTYPE_EDGE, }, /* WDT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2410subint[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+};
+
+void __init s3c2410_init_irq(void)
+{
+#ifdef CONFIG_FIQ
+ init_FIQ(FIQ_START);
+#endif
+
+ s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2410base[0], NULL,
+ 0x4a000000);
+ if (IS_ERR(s3c_intc[0])) {
+ pr_err("irq: could not create main interrupt controller\n");
+ return;
+ }
+
+ s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2410subint[0],
+ s3c_intc[0], 0x4a000018);
+ s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c_irq_data init_s3c2412base[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT1 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT2 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+ { .type = S3C_IRQTYPE_EDGE, }, /* WDT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* SDI/CF */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2412eint[32] = {
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 0 }, /* EINT0 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 1 }, /* EINT1 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 2 }, /* EINT2 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 3 }, /* EINT3 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */
+ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */
+};
+
+static struct s3c_irq_data init_s3c2412subint[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+ { .type = S3C_IRQTYPE_NONE, },
+ { .type = S3C_IRQTYPE_NONE, },
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* SDI */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* CF */
+};
+
+void __init s3c2412_init_irq(void)
+{
+ pr_info("S3C2412: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+ init_FIQ(FIQ_START);
+#endif
+
+ s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2412base[0], NULL,
+ 0x4a000000);
+ if (IS_ERR(s3c_intc[0])) {
+ pr_err("irq: could not create main interrupt controller\n");
+ return;
+ }
+
+ s3c24xx_init_intc(NULL, &init_s3c2412eint[0], s3c_intc[0], 0x560000a4);
+ s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2412subint[0],
+ s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2416
+static struct s3c_irq_data init_s3c2416base[32] = {
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */
+ { .type = S3C_IRQTYPE_NONE, }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* NAND */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+ { .type = S3C_IRQTYPE_NONE, },
+ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2416subint[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
+};
+
+static struct s3c_irq_data init_s3c2416_second[32] = {
+ { .type = S3C_IRQTYPE_EDGE }, /* 2D */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE }, /* PCM0 */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_EDGE }, /* I2S0 */
+};
+
+void __init s3c2416_init_irq(void)
+{
+ pr_info("S3C2416: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+ init_FIQ(FIQ_START);
+#endif
+
+ s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2416base[0], NULL,
+ 0x4a000000);
+ if (IS_ERR(s3c_intc[0])) {
+ pr_err("irq: could not create main interrupt controller\n");
+ return;
+ }
+
+ s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+ s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2416subint[0],
+ s3c_intc[0], 0x4a000018);
+
+ s3c_intc[2] = s3c24xx_init_intc(NULL, &init_s3c2416_second[0],
+ NULL, 0x4a000040);
+}
+
+#endif
+
+#ifdef CONFIG_CPU_S3C2440
+static struct s3c_irq_data init_s3c2440base[32] = {
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
+ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2440subint[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
+};
+
+void __init s3c2440_init_irq(void)
+{
+ pr_info("S3C2440: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+ init_FIQ(FIQ_START);
+#endif
+
+ s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2440base[0], NULL,
+ 0x4a000000);
+ if (IS_ERR(s3c_intc[0])) {
+ pr_err("irq: could not create main interrupt controller\n");
+ return;
+ }
+
+ s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+ s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2440subint[0],
+ s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2442
+static struct s3c_irq_data init_s3c2442base[32] = {
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
+ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+ { .type = S3C_IRQTYPE_EDGE, }, /* WDT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+static struct s3c_irq_data init_s3c2442subint[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
+};
+
+void __init s3c2442_init_irq(void)
+{
+ pr_info("S3C2442: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+ init_FIQ(FIQ_START);
+#endif
+
+ s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2442base[0], NULL,
+ 0x4a000000);
+ if (IS_ERR(s3c_intc[0])) {
+ pr_err("irq: could not create main interrupt controller\n");
+ return;
+ }
+
+ s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+ s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2442subint[0],
+ s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_CPU_S3C2443
+static struct s3c_irq_data init_s3c2443base[32] = {
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
+ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
+ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* CFON */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* NAND */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
+ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
+ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
+ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
+ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
+};
+
+
+static struct s3c_irq_data init_s3c2443subint[32] = {
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
+ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
+ { .type = S3C_IRQTYPE_NONE }, /* reserved */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD1 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
+ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
+};
+
+void __init s3c2443_init_irq(void)
+{
+ pr_info("S3C2443: IRQ Support\n");
+
+#ifdef CONFIG_FIQ
+ init_FIQ(FIQ_START);
+#endif
+
+ s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2443base[0], NULL,
+ 0x4a000000);
+ if (IS_ERR(s3c_intc[0])) {
+ pr_err("irq: could not create main interrupt controller\n");
+ return;
+ }
+
+ s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
+ s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2443subint[0],
+ s3c_intc[0], 0x4a000018);
+}
+#endif
+
+#ifdef CONFIG_OF
+static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ unsigned int ctrl_num = hw / 32;
+ unsigned int intc_hw = hw % 32;
+ struct s3c_irq_intc *intc = s3c_intc[ctrl_num];
+ struct s3c_irq_intc *parent_intc = intc->parent;
+ struct s3c_irq_data *irq_data = &intc->irqs[intc_hw];
+
+ /* attach controller pointer to irq_data */
+ irq_data->intc = intc;
+ irq_data->offset = intc_hw;
+
+ if (!parent_intc)
+ irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq);
+ else
+ irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
+ handle_edge_irq);
+
+ irq_set_chip_data(virq, irq_data);
+
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+/* Translate our of irq notation
+ * format: <ctrl_num ctrl_irq parent_irq type>
+ */
+static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ struct s3c_irq_intc *intc;
+ struct s3c_irq_intc *parent_intc;
+ struct s3c_irq_data *irq_data;
+ struct s3c_irq_data *parent_irq_data;
+ int irqno;
+
+ if (WARN_ON(intsize < 4))
+ return -EINVAL;
+
+ if (intspec[0] > 2 || !s3c_intc[intspec[0]]) {
+ pr_err("controller number %d invalid\n", intspec[0]);
+ return -EINVAL;
+ }
+ intc = s3c_intc[intspec[0]];
+
+ *out_hwirq = intspec[0] * 32 + intspec[2];
+ *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
+
+ parent_intc = intc->parent;
+ if (parent_intc) {
+ irq_data = &intc->irqs[intspec[2]];
+ irq_data->parent_irq = intspec[1];
+ parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
+ parent_irq_data->sub_intc = intc;
+ parent_irq_data->sub_bits |= (1UL << intspec[2]);
+
+ /* parent_intc is always s3c_intc[0], so no offset */
+ irqno = irq_create_mapping(parent_intc->domain, intspec[1]);
+ if (irqno < 0) {
+ pr_err("irq: could not map parent interrupt\n");
+ return irqno;
+ }
+
+ irq_set_chained_handler(irqno, s3c_irq_demux);
+ }
+
+ return 0;
+}
+
+static struct irq_domain_ops s3c24xx_irq_ops_of = {
+ .map = s3c24xx_irq_map_of,
+ .xlate = s3c24xx_irq_xlate_of,
+};
+
+struct s3c24xx_irq_of_ctrl {
+ char *name;
+ unsigned long offset;
+ struct s3c_irq_intc **handle;
+ struct s3c_irq_intc **parent;
+ struct irq_domain_ops *ops;
+};
+
+static int __init s3c_init_intc_of(struct device_node *np,
+ struct device_node *interrupt_parent,
+ struct s3c24xx_irq_of_ctrl *s3c_ctrl, int num_ctrl)
+{
+ struct s3c_irq_intc *intc;
+ struct s3c24xx_irq_of_ctrl *ctrl;
+ struct irq_domain *domain;
+ void __iomem *reg_base;
+ int i;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("irq-s3c24xx: could not map irq registers\n");
+ return -EINVAL;
+ }
+
+ domain = irq_domain_add_linear(np, num_ctrl * 32,
+ &s3c24xx_irq_ops_of, NULL);
+ if (!domain) {
+ pr_err("irq: could not create irq-domain\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ctrl; i++) {
+ ctrl = &s3c_ctrl[i];
+
+ pr_debug("irq: found controller %s\n", ctrl->name);
+
+ intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ intc->domain = domain;
+ intc->irqs = kzalloc(sizeof(struct s3c_irq_data) * 32,
+ GFP_KERNEL);
+ if (!intc->irqs) {
+ kfree(intc);
+ return -ENOMEM;
+ }
+
+ if (ctrl->parent) {
+ intc->reg_pending = reg_base + ctrl->offset;
+ intc->reg_mask = reg_base + ctrl->offset + 0x4;
+
+ if (*(ctrl->parent)) {
+ intc->parent = *(ctrl->parent);
+ } else {
+ pr_warn("irq: parent of %s missing\n",
+ ctrl->name);
+ kfree(intc->irqs);
+ kfree(intc);
+ continue;
+ }
+ } else {
+ intc->reg_pending = reg_base + ctrl->offset;
+ intc->reg_mask = reg_base + ctrl->offset + 0x08;
+ intc->reg_intpnd = reg_base + ctrl->offset + 0x10;
+ }
+
+ s3c24xx_clear_intc(intc);
+ s3c_intc[i] = intc;
+ }
+
+ set_handle_irq(s3c24xx_handle_irq);
+
+ return 0;
+}
+
+static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = {
+ {
+ .name = "intc",
+ .offset = 0,
+ }, {
+ .name = "subintc",
+ .offset = 0x18,
+ .parent = &s3c_intc[0],
+ }
+};
+
+int __init s3c2410_init_intc_of(struct device_node *np,
+ struct device_node *interrupt_parent,
+ struct s3c24xx_irq_of_ctrl *ctrl, int num_ctrl)
+{
+ return s3c_init_intc_of(np, interrupt_parent,
+ s3c2410_ctrl, ARRAY_SIZE(s3c2410_ctrl));
+}
+IRQCHIP_DECLARE(s3c2410_irq, "samsung,s3c2410-irq", s3c2410_init_intc_of);
+
+static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = {
+ {
+ .name = "intc",
+ .offset = 0,
+ }, {
+ .name = "subintc",
+ .offset = 0x18,
+ .parent = &s3c_intc[0],
+ }, {
+ .name = "intc2",
+ .offset = 0x40,
+ }
+};
+
+int __init s3c2416_init_intc_of(struct device_node *np,
+ struct device_node *interrupt_parent,
+ struct s3c24xx_irq_of_ctrl *ctrl, int num_ctrl)
+{
+ return s3c_init_intc_of(np, interrupt_parent,
+ s3c2416_ctrl, ARRAY_SIZE(s3c2416_ctrl));
+}
+IRQCHIP_DECLARE(s3c2416_irq, "samsung,s3c2416-irq", s3c2416_init_intc_of);
+#endif
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
new file mode 100644
index 0000000..69ea44e
--- /dev/null
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -0,0 +1,126 @@
+/*
+ * interrupt controller support for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/irqdomain.h>
+#include <linux/syscore_ops.h>
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+#include "irqchip.h"
+
+#define SIRFSOC_INT_RISC_MASK0 0x0018
+#define SIRFSOC_INT_RISC_MASK1 0x001C
+#define SIRFSOC_INT_RISC_LEVEL0 0x0020
+#define SIRFSOC_INT_RISC_LEVEL1 0x0024
+#define SIRFSOC_INIT_IRQ_ID 0x0038
+
+#define SIRFSOC_NUM_IRQS 128
+
+static struct irq_domain *sirfsoc_irqdomain;
+
+static __init void
+sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq);
+ ct = gc->chip_types;
+
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
+}
+
+static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
+{
+ void __iomem *base = sirfsoc_irqdomain->host_data;
+ u32 irqstat, irqnr;
+
+ irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
+ irqnr = irq_find_mapping(sirfsoc_irqdomain, irqstat & 0xff);
+
+ handle_IRQ(irqnr, regs);
+}
+
+static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *parent)
+{
+ void __iomem *base = of_iomap(np, 0);
+ if (!base)
+ panic("unable to map intc cpu registers\n");
+
+ /* using legacy because irqchip_generic does not work with linear */
+ sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0,
+ &irq_domain_simple_ops, base);
+
+ sirfsoc_alloc_gc(base, 0, 32);
+ sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
+
+ writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
+ writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
+
+ writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK0);
+ writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK1);
+
+ set_handle_irq(sirfsoc_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(sirfsoc_intc, "sirf,prima2-intc", sirfsoc_irq_init);
+
+struct sirfsoc_irq_status {
+ u32 mask0;
+ u32 mask1;
+ u32 level0;
+ u32 level1;
+};
+
+static struct sirfsoc_irq_status sirfsoc_irq_st;
+
+static int sirfsoc_irq_suspend(void)
+{
+ void __iomem *base = sirfsoc_irqdomain->host_data;
+
+ sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
+ sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
+ sirfsoc_irq_st.level0 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL0);
+ sirfsoc_irq_st.level1 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL1);
+
+ return 0;
+}
+
+static void sirfsoc_irq_resume(void)
+{
+ void __iomem *base = sirfsoc_irqdomain->host_data;
+
+ writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
+ writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
+ writel_relaxed(sirfsoc_irq_st.level0, base + SIRFSOC_INT_RISC_LEVEL0);
+ writel_relaxed(sirfsoc_irq_st.level1, base + SIRFSOC_INT_RISC_LEVEL1);
+}
+
+static struct syscore_ops sirfsoc_irq_syscore_ops = {
+ .suspend = sirfsoc_irq_suspend,
+ .resume = sirfsoc_irq_resume,
+};
+
+static int __init sirfsoc_irq_pm_init(void)
+{
+ if (!sirfsoc_irqdomain)
+ return 0;
+
+ register_syscore_ops(&sirfsoc_irq_syscore_ops);
+ return 0;
+}
+device_initcall(sirfsoc_irq_pm_init);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
new file mode 100644
index 0000000..b66d4ae
--- /dev/null
+++ b/drivers/irqchip/irq-sun4i.c
@@ -0,0 +1,149 @@
+/*
+ * Allwinner A1X SoCs IRQ chip driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * Benn Huang <benn@allwinnertech.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define SUN4I_IRQ_VECTOR_REG 0x00
+#define SUN4I_IRQ_PROTECTION_REG 0x08
+#define SUN4I_IRQ_NMI_CTRL_REG 0x0c
+#define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x)
+#define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x)
+#define SUN4I_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x)
+#define SUN4I_IRQ_MASK_REG(x) (0x50 + 0x4 * x)
+
+static void __iomem *sun4i_irq_base;
+static struct irq_domain *sun4i_irq_domain;
+
+static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
+
+void sun4i_irq_ack(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
+ writel(val | (1 << irq_off),
+ sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
+}
+
+static void sun4i_irq_mask(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+ writel(val & ~(1 << irq_off),
+ sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+}
+
+static void sun4i_irq_unmask(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+ writel(val | (1 << irq_off),
+ sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+}
+
+static struct irq_chip sun4i_irq_chip = {
+ .name = "sun4i_irq",
+ .irq_ack = sun4i_irq_ack,
+ .irq_mask = sun4i_irq_mask,
+ .irq_unmask = sun4i_irq_unmask,
+};
+
+static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &sun4i_irq_chip,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops sun4i_irq_ops = {
+ .map = sun4i_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init sun4i_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ sun4i_irq_base = of_iomap(node, 0);
+ if (!sun4i_irq_base)
+ panic("%s: unable to map IC registers\n",
+ node->full_name);
+
+ /* Disable all interrupts */
+ writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
+ writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
+ writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
+
+ /* Mask all the interrupts */
+ writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
+ writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
+ writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
+
+ /* Clear all the pending interrupts */
+ writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
+ writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1));
+ writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2));
+
+ /* Enable protection mode */
+ writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG);
+
+ /* Configure the external interrupt source type */
+ writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG);
+
+ sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
+ &sun4i_irq_ops, NULL);
+ if (!sun4i_irq_domain)
+ panic("%s: unable to create IRQ domain\n", node->full_name);
+
+ set_handle_irq(sun4i_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-ic", sun4i_of_init);
+
+static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
+{
+ u32 irq, hwirq;
+
+ hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+ while (hwirq != 0) {
+ irq = irq_find_mapping(sun4i_irq_domain, hwirq);
+ handle_IRQ(irq, regs);
+ hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+ }
+}
diff --git a/drivers/irqchip/irq-sunxi.c b/drivers/irqchip/irq-sunxi.c
deleted file mode 100644
index 10974fa..0000000
--- a/drivers/irqchip/irq-sunxi.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Allwinner A1X SoCs IRQ chip driver.
- *
- * Copyright (C) 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * Based on code from
- * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
- * Benn Huang <benn@allwinnertech.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include <linux/irqchip/sunxi.h>
-
-#define SUNXI_IRQ_VECTOR_REG 0x00
-#define SUNXI_IRQ_PROTECTION_REG 0x08
-#define SUNXI_IRQ_NMI_CTRL_REG 0x0c
-#define SUNXI_IRQ_PENDING_REG(x) (0x10 + 0x4 * x)
-#define SUNXI_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x)
-#define SUNXI_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x)
-#define SUNXI_IRQ_MASK_REG(x) (0x50 + 0x4 * x)
-
-static void __iomem *sunxi_irq_base;
-static struct irq_domain *sunxi_irq_domain;
-
-void sunxi_irq_ack(struct irq_data *irqd)
-{
- unsigned int irq = irqd_to_hwirq(irqd);
- unsigned int irq_off = irq % 32;
- int reg = irq / 32;
- u32 val;
-
- val = readl(sunxi_irq_base + SUNXI_IRQ_PENDING_REG(reg));
- writel(val | (1 << irq_off),
- sunxi_irq_base + SUNXI_IRQ_PENDING_REG(reg));
-}
-
-static void sunxi_irq_mask(struct irq_data *irqd)
-{
- unsigned int irq = irqd_to_hwirq(irqd);
- unsigned int irq_off = irq % 32;
- int reg = irq / 32;
- u32 val;
-
- val = readl(sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
- writel(val & ~(1 << irq_off),
- sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
-}
-
-static void sunxi_irq_unmask(struct irq_data *irqd)
-{
- unsigned int irq = irqd_to_hwirq(irqd);
- unsigned int irq_off = irq % 32;
- int reg = irq / 32;
- u32 val;
-
- val = readl(sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
- writel(val | (1 << irq_off),
- sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
-}
-
-static struct irq_chip sunxi_irq_chip = {
- .name = "sunxi_irq",
- .irq_ack = sunxi_irq_ack,
- .irq_mask = sunxi_irq_mask,
- .irq_unmask = sunxi_irq_unmask,
-};
-
-static int sunxi_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_and_handler(virq, &sunxi_irq_chip,
- handle_level_irq);
- set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
-
- return 0;
-}
-
-static struct irq_domain_ops sunxi_irq_ops = {
- .map = sunxi_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static int __init sunxi_of_init(struct device_node *node,
- struct device_node *parent)
-{
- sunxi_irq_base = of_iomap(node, 0);
- if (!sunxi_irq_base)
- panic("%s: unable to map IC registers\n",
- node->full_name);
-
- /* Disable all interrupts */
- writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(0));
- writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(1));
- writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(2));
-
- /* Mask all the interrupts */
- writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(0));
- writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(1));
- writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(2));
-
- /* Clear all the pending interrupts */
- writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(0));
- writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(1));
- writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(2));
-
- /* Enable protection mode */
- writel(0x01, sunxi_irq_base + SUNXI_IRQ_PROTECTION_REG);
-
- /* Configure the external interrupt source type */
- writel(0x00, sunxi_irq_base + SUNXI_IRQ_NMI_CTRL_REG);
-
- sunxi_irq_domain = irq_domain_add_linear(node, 3 * 32,
- &sunxi_irq_ops, NULL);
- if (!sunxi_irq_domain)
- panic("%s: unable to create IRQ domain\n", node->full_name);
-
- return 0;
-}
-
-static struct of_device_id sunxi_irq_dt_ids[] __initconst = {
- { .compatible = "allwinner,sunxi-ic", .data = sunxi_of_init },
- { }
-};
-
-void __init sunxi_init_irq(void)
-{
- of_irq_init(sunxi_irq_dt_ids);
-}
-
-asmlinkage void __exception_irq_entry sunxi_handle_irq(struct pt_regs *regs)
-{
- u32 irq, hwirq;
-
- hwirq = readl(sunxi_irq_base + SUNXI_IRQ_VECTOR_REG) >> 2;
- while (hwirq != 0) {
- irq = irq_find_mapping(sunxi_irq_domain, hwirq);
- handle_IRQ(irq, regs);
- hwirq = readl(sunxi_irq_base + SUNXI_IRQ_VECTOR_REG) >> 2;
- }
-}
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 3cf97aa..884d11c 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -33,7 +34,7 @@
#include <linux/irqchip/arm-vic.h>
#include <asm/exception.h>
-#include <asm/mach/irq.h>
+#include <asm/irq.h>
#include "irqchip.h"
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
new file mode 100644
index 0000000..d970595
--- /dev/null
+++ b/drivers/irqchip/irq-vt8500.c
@@ -0,0 +1,259 @@
+/*
+ * arch/arm/mach-vt8500/irq.c
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file is copied and modified from the original irq.c provided by
+ * Alexey Charkov. Minor changes have been made for Device Tree Support.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define VT8500_ICPC_IRQ 0x20
+#define VT8500_ICPC_FIQ 0x24
+#define VT8500_ICDC 0x40 /* Destination Control 64*u32 */
+#define VT8500_ICIS 0x80 /* Interrupt status, 16*u32 */
+
+/* ICPC */
+#define ICPC_MASK 0x3F
+#define ICPC_ROTATE BIT(6)
+
+/* IC_DCTR */
+#define ICDC_IRQ 0x00
+#define ICDC_FIQ 0x01
+#define ICDC_DSS0 0x02
+#define ICDC_DSS1 0x03
+#define ICDC_DSS2 0x04
+#define ICDC_DSS3 0x05
+#define ICDC_DSS4 0x06
+#define ICDC_DSS5 0x07
+
+#define VT8500_INT_DISABLE 0
+#define VT8500_INT_ENABLE BIT(3)
+
+#define VT8500_TRIGGER_HIGH 0
+#define VT8500_TRIGGER_RISING BIT(5)
+#define VT8500_TRIGGER_FALLING BIT(6)
+#define VT8500_EDGE ( VT8500_TRIGGER_RISING \
+ | VT8500_TRIGGER_FALLING)
+
+/* vt8500 has 1 intc, wm8505 and wm8650 have 2 */
+#define VT8500_INTC_MAX 2
+
+struct vt8500_irq_data {
+ void __iomem *base; /* IO Memory base address */
+ struct irq_domain *domain; /* Domain for this controller */
+};
+
+/* Global variable for accessing io-mem addresses */
+static struct vt8500_irq_data intc[VT8500_INTC_MAX];
+static u32 active_cnt = 0;
+
+static void vt8500_irq_mask(struct irq_data *d)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
+ u8 edge, dctr;
+ u32 status;
+
+ edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
+ if (edge) {
+ status = readl(stat_reg);
+
+ status |= (1 << (d->hwirq & 0x1f));
+ writel(status, stat_reg);
+ } else {
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr &= ~VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
+ }
+}
+
+static void vt8500_irq_unmask(struct irq_data *d)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ u8 dctr;
+
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr |= VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
+}
+
+static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ u8 dctr;
+
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr &= ~VT8500_EDGE;
+
+ switch (flow_type) {
+ case IRQF_TRIGGER_LOW:
+ return -EINVAL;
+ case IRQF_TRIGGER_HIGH:
+ dctr |= VT8500_TRIGGER_HIGH;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ break;
+ case IRQF_TRIGGER_FALLING:
+ dctr |= VT8500_TRIGGER_FALLING;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ break;
+ case IRQF_TRIGGER_RISING:
+ dctr |= VT8500_TRIGGER_RISING;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ break;
+ }
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
+
+ return 0;
+}
+
+static struct irq_chip vt8500_irq_chip = {
+ .name = "vt8500",
+ .irq_ack = vt8500_irq_mask,
+ .irq_mask = vt8500_irq_mask,
+ .irq_unmask = vt8500_irq_unmask,
+ .irq_set_type = vt8500_irq_set_type,
+};
+
+static void __init vt8500_init_irq_hw(void __iomem *base)
+{
+ u32 i;
+
+ /* Enable rotating priority for IRQ */
+ writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ);
+ writel(0x00, base + VT8500_ICPC_FIQ);
+
+ /* Disable all interrupts and route them to IRQ */
+ for (i = 0; i < 64; i++)
+ writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i);
+}
+
+static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops vt8500_irq_domain_ops = {
+ .map = vt8500_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+asmlinkage void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+{
+ u32 stat, i;
+ int irqnr, virq;
+ void __iomem *base;
+
+ /* Loop through each active controller */
+ for (i=0; i<active_cnt; i++) {
+ base = intc[i].base;
+ irqnr = readl_relaxed(base) & 0x3F;
+ /*
+ Highest Priority register default = 63, so check that this
+ is a real interrupt by checking the status register
+ */
+ if (irqnr == 63) {
+ stat = readl_relaxed(base + VT8500_ICIS + 4);
+ if (!(stat & BIT(31)))
+ continue;
+ }
+
+ virq = irq_find_mapping(intc[i].domain, irqnr);
+ handle_IRQ(virq, regs);
+ }
+}
+
+int __init vt8500_irq_init(struct device_node *node, struct device_node *parent)
+{
+ int irq, i;
+ struct device_node *np = node;
+
+ if (active_cnt == VT8500_INTC_MAX) {
+ pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
+ __func__);
+ goto out;
+ }
+
+ intc[active_cnt].base = of_iomap(np, 0);
+ intc[active_cnt].domain = irq_domain_add_linear(node, 64,
+ &vt8500_irq_domain_ops, &intc[active_cnt]);
+
+ if (!intc[active_cnt].base) {
+ pr_err("%s: Unable to map IO memory\n", __func__);
+ goto out;
+ }
+
+ if (!intc[active_cnt].domain) {
+ pr_err("%s: Unable to add irq domain!\n", __func__);
+ goto out;
+ }
+
+ set_handle_irq(vt8500_handle_irq);
+
+ vt8500_init_irq_hw(intc[active_cnt].base);
+
+ pr_info("vt8500-irq: Added interrupt controller\n");
+
+ active_cnt++;
+
+ /* check if this is a slaved controller */
+ if (of_irq_count(np) != 0) {
+ /* check that we have the correct number of interrupts */
+ if (of_irq_count(np) != 8) {
+ pr_err("%s: Incorrect IRQ map for slaved controller\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ irq = irq_of_parse_and_map(np, i);
+ enable_irq(irq);
+ }
+
+ pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
+ }
+out:
+ return 0;
+}
+
+IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 03a0a01..3286903 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -2334,7 +2334,7 @@ static int gigaset_proc_show(struct seq_file *m, void *v)
static int gigaset_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, gigaset_proc_show, PDE(inode)->data);
+ return single_open(file, gigaset_proc_show, PDE_DATA(inode));
}
static const struct file_operations gigaset_proc_fops = {
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 821f7ac..4d9b195 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -702,7 +702,7 @@ static int b1ctl_proc_show(struct seq_file *m, void *v)
static int b1ctl_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, b1ctl_proc_show, PDE(inode)->data);
+ return single_open(file, b1ctl_proc_show, PDE_DATA(inode));
}
const struct file_operations b1ctl_proc_fops = {
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 0896aa8..19b113f 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -944,7 +944,7 @@ static int b1dmactl_proc_show(struct seq_file *m, void *v)
static int b1dmactl_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, b1dmactl_proc_show, PDE(inode)->data);
+ return single_open(file, b1dmactl_proc_show, PDE_DATA(inode));
}
const struct file_operations b1dmactl_proc_fops = {
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 1d7fc44..5d00d72 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1129,7 +1129,7 @@ static int c4_proc_show(struct seq_file *m, void *v)
static int c4_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, c4_proc_show, PDE(inode)->data);
+ return single_open(file, c4_proc_show, PDE_DATA(inode));
}
static const struct file_operations c4_proc_fops = {
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 3a4165c..56ce98a 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -145,7 +145,7 @@ void remove_divas_proc(void)
static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
+ diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -172,7 +172,7 @@ static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
+ diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -210,7 +210,7 @@ static int d_l1_down_proc_show(struct seq_file *m, void *v)
static int d_l1_down_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, d_l1_down_proc_show, PDE(inode)->data);
+ return single_open(file, d_l1_down_proc_show, PDE_DATA(inode));
}
static const struct file_operations d_l1_down_proc_fops = {
@@ -236,7 +236,7 @@ static int grp_opt_proc_show(struct seq_file *m, void *v)
static int grp_opt_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, grp_opt_proc_show, PDE(inode)->data);
+ return single_open(file, grp_opt_proc_show, PDE_DATA(inode));
}
static const struct file_operations grp_opt_proc_fops = {
@@ -251,7 +251,7 @@ static const struct file_operations grp_opt_proc_fops = {
static ssize_t info_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
+ diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
char c[4];
@@ -335,7 +335,7 @@ static int info_proc_show(struct seq_file *m, void *v)
static int info_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, info_proc_show, PDE(inode)->data);
+ return single_open(file, info_proc_show, PDE_DATA(inode));
}
static const struct file_operations info_proc_fops = {
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 931f916..00aad10 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -469,7 +469,7 @@ static int hycapi_proc_show(struct seq_file *m, void *v)
static int hycapi_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, hycapi_proc_show, PDE(inode)->data);
+ return single_open(file, hycapi_proc_show, PDE_DATA(inode));
}
static const struct file_operations hycapi_proc_fops = {
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 8023d25..7307921 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -229,23 +229,12 @@ static int
hysdn_conf_open(struct inode *ino, struct file *filep)
{
hysdn_card *card;
- struct proc_dir_entry *pd;
struct conf_writedata *cnf;
char *cp, *tmp;
/* now search the addressed card */
mutex_lock(&hysdn_conf_mutex);
- card = card_root;
- while (card) {
- pd = card->procconf;
- if (pd == PDE(ino))
- break;
- card = card->next; /* search next entry */
- }
- if (!card) {
- mutex_unlock(&hysdn_conf_mutex);
- return (-ENODEV); /* device is unknown/invalid */
- }
+ card = PDE_DATA(ino);
if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x",
filep->f_cred->fsuid, filep->f_cred->fsgid,
@@ -317,21 +306,9 @@ hysdn_conf_close(struct inode *ino, struct file *filep)
hysdn_card *card;
struct conf_writedata *cnf;
int retval = 0;
- struct proc_dir_entry *pd;
mutex_lock(&hysdn_conf_mutex);
- /* search the addressed card */
- card = card_root;
- while (card) {
- pd = card->procconf;
- if (pd == PDE(ino))
- break;
- card = card->next; /* search next entry */
- }
- if (!card) {
- mutex_unlock(&hysdn_conf_mutex);
- return (-ENODEV); /* device is unknown/invalid */
- }
+ card = PDE_DATA(ino);
if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x",
filep->f_cred->fsuid, filep->f_cred->fsgid,
@@ -394,10 +371,11 @@ hysdn_procconf_init(void)
while (card) {
sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
- if ((card->procconf = (void *) proc_create(conf_name,
+ if ((card->procconf = (void *) proc_create_data(conf_name,
S_IFREG | S_IRUGO | S_IWUSR,
hysdn_proc_entry,
- &conf_fops)) != NULL) {
+ &conf_fops,
+ card)) != NULL) {
hysdn_proclog_init(card); /* init the log file entry */
}
card = card->next; /* next entry */
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 9a3ce93..b61e8d5 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -173,27 +173,14 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
struct log_data *inf;
int len;
- struct proc_dir_entry *pde = PDE(file_inode(file));
- struct procdata *pd = NULL;
- hysdn_card *card;
+ hysdn_card *card = PDE_DATA(file_inode(file));
if (!*((struct log_data **) file->private_data)) {
+ struct procdata *pd = card->proclog;
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
- /* sorry, but we need to search the card */
- card = card_root;
- while (card) {
- pd = card->proclog;
- if (pd->log == pde)
- break;
- card = card->next; /* search next entry */
- }
- if (card)
- interruptible_sleep_on(&(pd->rd_queue));
- else
- return (-EAGAIN);
-
+ interruptible_sleep_on(&(pd->rd_queue));
}
if (!(inf = *((struct log_data **) file->private_data)))
return (0);
@@ -215,27 +202,15 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
static int
hysdn_log_open(struct inode *ino, struct file *filep)
{
- hysdn_card *card;
- struct procdata *pd = NULL;
- unsigned long flags;
+ hysdn_card *card = PDE_DATA(ino);
mutex_lock(&hysdn_log_mutex);
- card = card_root;
- while (card) {
- pd = card->proclog;
- if (pd->log == PDE(ino))
- break;
- card = card->next; /* search next entry */
- }
- if (!card) {
- mutex_unlock(&hysdn_log_mutex);
- return (-ENODEV); /* device is unknown/invalid */
- }
- filep->private_data = card; /* remember our own card */
-
if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
/* write only access -> write log level only */
+ filep->private_data = card; /* remember our own card */
} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
+ struct procdata *pd = card->proclog;
+ unsigned long flags;
/* read access -> log/debug read */
spin_lock_irqsave(&card->hysdn_lock, flags);
@@ -275,21 +250,13 @@ hysdn_log_close(struct inode *ino, struct file *filep)
} else {
/* read access -> log/debug read, mark one further file as closed */
- pd = NULL;
inf = *((struct log_data **) filep->private_data); /* get first log entry */
if (inf)
pd = (struct procdata *) inf->proc_ctrl; /* still entries there */
else {
/* no info available -> search card */
- card = card_root;
- while (card) {
- pd = card->proclog;
- if (pd->log == PDE(ino))
- break;
- card = card->next; /* search next entry */
- }
- if (card)
- pd = card->proclog; /* pointer to procfs log */
+ card = PDE_DATA(file_inode(filep));
+ pd = card->proclog; /* pointer to procfs log */
}
if (pd)
pd->if_used--; /* decrement interface usage count by one */
@@ -319,24 +286,12 @@ static unsigned int
hysdn_log_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- struct proc_dir_entry *pde = PDE(file_inode(file));
- hysdn_card *card;
- struct procdata *pd = NULL;
+ hysdn_card *card = PDE_DATA(file_inode(file));
+ struct procdata *pd = card->proclog;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
return (mask); /* no polling for write supported */
- /* we need to search the card */
- card = card_root;
- while (card) {
- pd = card->proclog;
- if (pd->log == pde)
- break;
- card = card->next; /* search next entry */
- }
- if (!card)
- return (mask); /* card not found */
-
poll_wait(file, &(pd->rd_queue), wait);
if (*((struct log_data **) file->private_data))
@@ -373,9 +328,9 @@ hysdn_proclog_init(hysdn_card *card)
if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
- pd->log = proc_create(pd->log_name,
+ pd->log = proc_create_data(pd->log_name,
S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
- &log_fops);
+ &log_fops, card);
init_waitqueue_head(&(pd->rd_queue));
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 1094667..9438d7e 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -64,7 +64,6 @@ mISDN_open(struct inode *ino, struct file *filep)
dev->work = 0;
init_waitqueue_head(&dev->wait);
filep->private_data = dev;
- __module_get(THIS_MODULE);
return nonseekable_open(ino, filep);
}
@@ -72,19 +71,28 @@ static int
mISDN_close(struct inode *ino, struct file *filep)
{
struct mISDNtimerdev *dev = filep->private_data;
+ struct list_head *list = &dev->pending;
struct mISDNtimer *timer, *next;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
- list_for_each_entry_safe(timer, next, &dev->pending, list) {
- del_timer(&timer->tl);
+
+ spin_lock_irq(&dev->lock);
+ while (!list_empty(list)) {
+ timer = list_first_entry(list, struct mISDNtimer, list);
+ spin_unlock_irq(&dev->lock);
+ del_timer_sync(&timer->tl);
+ spin_lock_irq(&dev->lock);
+ /* it might have been moved to ->expired */
+ list_del(&timer->list);
kfree(timer);
}
+ spin_unlock_irq(&dev->lock);
+
list_for_each_entry_safe(timer, next, &dev->expired, list) {
kfree(timer);
}
kfree(dev);
- module_put(THIS_MODULE);
return 0;
}
@@ -92,36 +100,41 @@ static ssize_t
mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
{
struct mISDNtimerdev *dev = filep->private_data;
+ struct list_head *list = &dev->expired;
struct mISDNtimer *timer;
- u_long flags;
int ret = 0;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
filep, buf, (int)count, off);
- if (list_empty(&dev->expired) && (dev->work == 0)) {
+ if (count < sizeof(int))
+ return -ENOSPC;
+
+ spin_lock_irq(&dev->lock);
+ while (list_empty(list) && (dev->work == 0)) {
+ spin_unlock_irq(&dev->lock);
if (filep->f_flags & O_NONBLOCK)
return -EAGAIN;
wait_event_interruptible(dev->wait, (dev->work ||
- !list_empty(&dev->expired)));
+ !list_empty(list)));
if (signal_pending(current))
return -ERESTARTSYS;
+ spin_lock_irq(&dev->lock);
}
- if (count < sizeof(int))
- return -ENOSPC;
if (dev->work)
dev->work = 0;
- if (!list_empty(&dev->expired)) {
- spin_lock_irqsave(&dev->lock, flags);
- timer = (struct mISDNtimer *)dev->expired.next;
+ if (!list_empty(list)) {
+ timer = list_first_entry(list, struct mISDNtimer, list);
list_del(&timer->list);
- spin_unlock_irqrestore(&dev->lock, flags);
+ spin_unlock_irq(&dev->lock);
if (put_user(timer->id, (int __user *)buf))
ret = -EFAULT;
else
ret = sizeof(int);
kfree(timer);
+ } else {
+ spin_unlock_irq(&dev->lock);
}
return ret;
}
@@ -153,7 +166,8 @@ dev_expire_timer(unsigned long data)
u_long flags;
spin_lock_irqsave(&timer->dev->lock, flags);
- list_move_tail(&timer->list, &timer->dev->expired);
+ if (timer->id >= 0)
+ list_move_tail(&timer->list, &timer->dev->expired);
spin_unlock_irqrestore(&timer->dev->lock, flags);
wake_up_interruptible(&timer->dev->wait);
}
@@ -162,7 +176,6 @@ static int
misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
{
int id;
- u_long flags;
struct mISDNtimer *timer;
if (!timeout) {
@@ -173,19 +186,16 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
timer = kzalloc(sizeof(struct mISDNtimer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
- spin_lock_irqsave(&dev->lock, flags);
- timer->id = dev->next_id++;
+ timer->dev = dev;
+ setup_timer(&timer->tl, dev_expire_timer, (long)timer);
+ spin_lock_irq(&dev->lock);
+ id = timer->id = dev->next_id++;
if (dev->next_id < 0)
dev->next_id = 1;
list_add_tail(&timer->list, &dev->pending);
- spin_unlock_irqrestore(&dev->lock, flags);
- timer->dev = dev;
- timer->tl.data = (long)timer;
- timer->tl.function = dev_expire_timer;
- init_timer(&timer->tl);
timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
add_timer(&timer->tl);
- id = timer->id;
+ spin_unlock_irq(&dev->lock);
}
return id;
}
@@ -193,26 +203,21 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
static int
misdn_del_timer(struct mISDNtimerdev *dev, int id)
{
- u_long flags;
struct mISDNtimer *timer;
- int ret = 0;
- spin_lock_irqsave(&dev->lock, flags);
+ spin_lock_irq(&dev->lock);
list_for_each_entry(timer, &dev->pending, list) {
if (timer->id == id) {
list_del_init(&timer->list);
- /* RED-PEN AK: race -- timer can be still running on
- * other CPU. Needs reference count I think
- */
- del_timer(&timer->tl);
- ret = timer->id;
+ timer->id = -1;
+ spin_unlock_irq(&dev->lock);
+ del_timer_sync(&timer->tl);
kfree(timer);
- goto unlock;
+ return id;
}
}
-unlock:
- spin_unlock_irqrestore(&dev->lock, flags);
- return ret;
+ spin_unlock_irq(&dev->lock);
+ return 0;
}
static long
@@ -262,6 +267,7 @@ mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
}
static const struct file_operations mISDN_fops = {
+ .owner = THIS_MODULE,
.read = mISDN_read,
.poll = mISDN_poll,
.unlocked_ioctl = mISDN_ioctl,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ec50824..d44806d 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -194,8 +194,8 @@ config LEDS_LP3944
module will be called leds-lp3944.
config LEDS_LP55XX_COMMON
- tristate "Common Driver for TI/National LP5521 and LP5523/55231"
- depends on LEDS_LP5521 || LEDS_LP5523
+ tristate "Common Driver for TI/National LP5521, LP5523/55231 and LP5562"
+ depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562
select FW_LOADER
help
This option supports common operations for LP5521 and LP5523/55231
@@ -222,6 +222,16 @@ config LEDS_LP5523
Driver provides direct control via LED class and interface for
programming the engines.
+config LEDS_LP5562
+ tristate "LED Support for TI LP5562 LED driver chip"
+ depends on LEDS_CLASS && I2C
+ select LEDS_LP55XX_COMMON
+ help
+ If you say yes here you get support for TI LP5562 LED driver.
+ It is 4 channels chip with programmable engines.
+ Driver provides direct control via LED class and interface for
+ programming the engines.
+
config LEDS_LP8788
tristate "LED support for the TI LP8788 PMIC"
depends on LEDS_CLASS
@@ -469,106 +479,7 @@ config LEDS_BLINKM
This option enables support for the BlinkM RGB LED connected
through I2C. Say Y to enable support for the BlinkM LED.
-config LEDS_TRIGGERS
- bool "LED Trigger support"
- depends on LEDS_CLASS
- help
- This option enables trigger support for the leds class.
- These triggers allow kernel events to drive the LEDs and can
- be configured via sysfs. If unsure, say Y.
-
comment "LED Triggers"
-
-config LEDS_TRIGGER_TIMER
- tristate "LED Timer Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to be controlled by a programmable timer
- via sysfs. Some LED hardware can be programmed to start
- blinking the LED without any further software interaction.
- For more details read Documentation/leds/leds-class.txt.
-
- If unsure, say Y.
-
-config LEDS_TRIGGER_ONESHOT
- tristate "LED One-shot Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to blink in one-shot pulses with parameters
- controlled via sysfs. It's useful to notify the user on
- sporadic events, when there are no clear begin and end trap points,
- or on dense events, where this blinks the LED at constant rate if
- rearmed continuously.
-
- It also shows how to use the led_blink_set_oneshot() function.
-
- If unsure, say Y.
-
-config LEDS_TRIGGER_IDE_DISK
- bool "LED IDE Disk Trigger"
- depends on IDE_GD_ATA
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to be controlled by IDE disk activity.
- If unsure, say Y.
-
-config LEDS_TRIGGER_HEARTBEAT
- tristate "LED Heartbeat Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to be controlled by a CPU load average.
- The flash frequency is a hyperbolic function of the 1-minute
- load average.
- If unsure, say Y.
-
-config LEDS_TRIGGER_BACKLIGHT
- tristate "LED backlight Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to be controlled as a backlight device: they
- turn off and on when the display is blanked and unblanked.
-
- If unsure, say N.
-
-config LEDS_TRIGGER_CPU
- bool "LED CPU Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to be controlled by active CPUs. This shows
- the active CPUs across an array of LEDs so you can see which
- CPUs are active on the system at any given moment.
-
- If unsure, say N.
-
-config LEDS_TRIGGER_GPIO
- tristate "LED GPIO Trigger"
- depends on LEDS_TRIGGERS
- depends on GPIOLIB
- help
- This allows LEDs to be controlled by gpio events. It's good
- when using gpios as switches and triggering the needed LEDs
- from there. One use case is n810's keypad LEDs that could
- be triggered by this trigger when user slides up to show
- keypad.
-
- If unsure, say N.
-
-config LEDS_TRIGGER_DEFAULT_ON
- tristate "LED Default ON Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows LEDs to be initialised in the ON state.
- If unsure, say Y.
-
-comment "iptables trigger is under Netfilter config (LED target)"
- depends on LEDS_TRIGGERS
-
-config LEDS_TRIGGER_TRANSIENT
- tristate "LED Transient Trigger"
- depends on LEDS_TRIGGERS
- help
- This allows one time activation of a transient state on
- GPIO/PWM based hardware.
- If unsure, say Y.
+source "drivers/leds/trigger/Kconfig"
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 215e7e3..ac28977 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
+obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
@@ -57,12 +58,4 @@ obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
# LED Triggers
-obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
-obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
-obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
-obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
-obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
-obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
-obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
-obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
-obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
+obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index b474745..cf9efe4 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -134,6 +134,7 @@ static int asic3_led_remove(struct platform_device *pdev)
return mfd_cell_disable(pdev);
}
+#ifdef CONFIG_PM_SLEEP
static int asic3_led_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -159,11 +160,9 @@ static int asic3_led_resume(struct device *dev)
return ret;
}
+#endif
-static const struct dev_pm_ops asic3_led_pm_ops = {
- .suspend = asic3_led_suspend,
- .resume = asic3_led_resume,
-};
+static SIMPLE_DEV_PM_OPS(asic3_led_pm_ops, asic3_led_suspend, asic3_led_resume);
static struct platform_driver asic3_led_driver = {
.probe = asic3_led_probe,
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 3867735..8a39c5b 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -113,7 +113,7 @@ err:
return status;
}
-static int __exit pwmled_remove(struct platform_device *pdev)
+static int pwmled_remove(struct platform_device *pdev)
{
const struct gpio_led_platform_data *pdata;
struct pwmled *leds;
@@ -140,7 +140,7 @@ static struct platform_driver pwmled_driver = {
},
/* REVISIT add suspend() and resume() methods */
.probe = pwmled_probe,
- .remove = __exit_p(pwmled_remove),
+ .remove = pwmled_remove,
};
module_platform_driver(pwmled_driver);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 8515170..2db0423 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -732,7 +732,7 @@ failed_unregister_dev_file:
return ret;
}
-static int __exit bd2802_remove(struct i2c_client *client)
+static int bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
int i;
@@ -747,8 +747,7 @@ static int __exit bd2802_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static void bd2802_restore_state(struct bd2802_led *led)
{
int i;
@@ -785,12 +784,9 @@ static int bd2802_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume);
-#define BD2802_PM (&bd2802_pm)
-#else /* CONFIG_PM */
-#define BD2802_PM NULL
-#endif
static const struct i2c_device_id bd2802_id[] = {
{ "BD2802", 0 },
@@ -801,10 +797,10 @@ MODULE_DEVICE_TABLE(i2c, bd2802_id);
static struct i2c_driver bd2802_i2c_driver = {
.driver = {
.name = "BD2802",
- .pm = BD2802_PM,
+ .pm = &bd2802_pm,
},
.probe = bd2802_probe,
- .remove = __exit_p(bd2802_remove),
+ .remove = bd2802_remove,
.id_table = bd2802_id,
};
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 4117235..d81a8e7 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -477,6 +477,7 @@ static int lm355x_probe(struct i2c_client *client,
chip->cdev_flash.name = "flash";
chip->cdev_flash.max_brightness = 16;
chip->cdev_flash.brightness_set = lm355x_strobe_brightness_set;
+ chip->cdev_flash.default_trigger = "flash";
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_flash);
if (err < 0)
@@ -486,6 +487,7 @@ static int lm355x_probe(struct i2c_client *client,
chip->cdev_torch.name = "torch";
chip->cdev_torch.max_brightness = 8;
chip->cdev_torch.brightness_set = lm355x_torch_brightness_set;
+ chip->cdev_torch.default_trigger = "torch";
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_torch);
if (err < 0)
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 9f428d9..f361bbe 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -363,6 +363,7 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_flash.name = "flash";
chip->cdev_flash.max_brightness = 16;
chip->cdev_flash.brightness_set = lm3642_strobe_brightness_set;
+ chip->cdev_flash.default_trigger = "flash";
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_flash);
if (err < 0) {
@@ -380,6 +381,7 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_torch.name = "torch";
chip->cdev_torch.max_brightness = 8;
chip->cdev_torch.brightness_set = lm3642_torch_brightness_set;
+ chip->cdev_torch.default_trigger = "torch";
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_torch);
if (err < 0) {
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 1001347..19752c9 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -68,6 +68,18 @@
#define LP5521_ENABLE_RUN_PROGRAM \
(LP5521_ENABLE_DEFAULT | LP5521_EXEC_RUN)
+/* CONFIG register */
+#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
+#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
+#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
+#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
+#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
+#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
+#define LP5521_R_TO_BATT 0x04 /* R out: 0 = CP, 1 = Vbat */
+#define LP5521_CLK_INT 0x01 /* Internal clock */
+#define LP5521_DEFAULT_CFG \
+ (LP5521_PWM_HF | LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO)
+
/* Status */
#define LP5521_EXT_CLK_USED 0x08
@@ -296,8 +308,11 @@ static int lp5521_post_init_device(struct lp55xx_chip *chip)
/* Set all PWMs to direct control mode */
ret = lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
- val = chip->pdata->update_config ?
- : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+ /* Update configuration for the clock setting */
+ val = LP5521_DEFAULT_CFG;
+ if (!lp55xx_is_extclk_used(chip))
+ val |= LP5521_CLK_INT;
+
ret = lp55xx_write(chip, LP5521_REG_CONFIG, val);
if (ret)
return ret;
@@ -360,7 +375,8 @@ static ssize_t lp5521_selftest(struct device *dev,
mutex_lock(&chip->lock);
ret = lp5521_run_selftest(chip, buf);
mutex_unlock(&chip->lock);
- return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ret ? "FAIL" : "OK");
}
/* device attributes */
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
new file mode 100644
index 0000000..513f239
--- /dev/null
+++ b/drivers/leds/leds-lp5562.c
@@ -0,0 +1,599 @@
+/*
+ * LP5562 LED driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/leds-lp55xx.h>
+#include <linux/slab.h>
+
+#include "leds-lp55xx-common.h"
+
+#define LP5562_PROGRAM_LENGTH 32
+#define LP5562_MAX_LEDS 4
+
+/* ENABLE Register 00h */
+#define LP5562_REG_ENABLE 0x00
+#define LP5562_EXEC_ENG1_M 0x30
+#define LP5562_EXEC_ENG2_M 0x0C
+#define LP5562_EXEC_ENG3_M 0x03
+#define LP5562_EXEC_M 0x3F
+#define LP5562_MASTER_ENABLE 0x40 /* Chip master enable */
+#define LP5562_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */
+#define LP5562_EXEC_RUN 0x2A
+#define LP5562_ENABLE_DEFAULT \
+ (LP5562_MASTER_ENABLE | LP5562_LOGARITHMIC_PWM)
+#define LP5562_ENABLE_RUN_PROGRAM \
+ (LP5562_ENABLE_DEFAULT | LP5562_EXEC_RUN)
+
+/* OPMODE Register 01h */
+#define LP5562_REG_OP_MODE 0x01
+#define LP5562_MODE_ENG1_M 0x30
+#define LP5562_MODE_ENG2_M 0x0C
+#define LP5562_MODE_ENG3_M 0x03
+#define LP5562_LOAD_ENG1 0x10
+#define LP5562_LOAD_ENG2 0x04
+#define LP5562_LOAD_ENG3 0x01
+#define LP5562_RUN_ENG1 0x20
+#define LP5562_RUN_ENG2 0x08
+#define LP5562_RUN_ENG3 0x02
+#define LP5562_ENG1_IS_LOADING(mode) \
+ ((mode & LP5562_MODE_ENG1_M) == LP5562_LOAD_ENG1)
+#define LP5562_ENG2_IS_LOADING(mode) \
+ ((mode & LP5562_MODE_ENG2_M) == LP5562_LOAD_ENG2)
+#define LP5562_ENG3_IS_LOADING(mode) \
+ ((mode & LP5562_MODE_ENG3_M) == LP5562_LOAD_ENG3)
+
+/* BRIGHTNESS Registers */
+#define LP5562_REG_R_PWM 0x04
+#define LP5562_REG_G_PWM 0x03
+#define LP5562_REG_B_PWM 0x02
+#define LP5562_REG_W_PWM 0x0E
+
+/* CURRENT Registers */
+#define LP5562_REG_R_CURRENT 0x07
+#define LP5562_REG_G_CURRENT 0x06
+#define LP5562_REG_B_CURRENT 0x05
+#define LP5562_REG_W_CURRENT 0x0F
+
+/* CONFIG Register 08h */
+#define LP5562_REG_CONFIG 0x08
+#define LP5562_PWM_HF 0x40
+#define LP5562_PWRSAVE_EN 0x20
+#define LP5562_CLK_INT 0x01 /* Internal clock */
+#define LP5562_DEFAULT_CFG (LP5562_PWM_HF | LP5562_PWRSAVE_EN)
+
+/* RESET Register 0Dh */
+#define LP5562_REG_RESET 0x0D
+#define LP5562_RESET 0xFF
+
+/* PROGRAM ENGINE Registers */
+#define LP5562_REG_PROG_MEM_ENG1 0x10
+#define LP5562_REG_PROG_MEM_ENG2 0x30
+#define LP5562_REG_PROG_MEM_ENG3 0x50
+
+/* LEDMAP Register 70h */
+#define LP5562_REG_ENG_SEL 0x70
+#define LP5562_ENG_SEL_PWM 0
+#define LP5562_ENG_FOR_RGB_M 0x3F
+#define LP5562_ENG_SEL_RGB 0x1B /* R:ENG1, G:ENG2, B:ENG3 */
+#define LP5562_ENG_FOR_W_M 0xC0
+#define LP5562_ENG1_FOR_W 0x40 /* W:ENG1 */
+#define LP5562_ENG2_FOR_W 0x80 /* W:ENG2 */
+#define LP5562_ENG3_FOR_W 0xC0 /* W:ENG3 */
+
+/* Program Commands */
+#define LP5562_CMD_DISABLE 0x00
+#define LP5562_CMD_LOAD 0x15
+#define LP5562_CMD_RUN 0x2A
+#define LP5562_CMD_DIRECT 0x3F
+#define LP5562_PATTERN_OFF 0
+
+static inline void lp5562_wait_opmode_done(void)
+{
+ /* operation mode change needs to be longer than 153 us */
+ usleep_range(200, 300);
+}
+
+static inline void lp5562_wait_enable_done(void)
+{
+ /* it takes more 488 us to update ENABLE register */
+ usleep_range(500, 600);
+}
+
+static void lp5562_set_led_current(struct lp55xx_led *led, u8 led_current)
+{
+ u8 addr[] = {
+ LP5562_REG_R_CURRENT,
+ LP5562_REG_G_CURRENT,
+ LP5562_REG_B_CURRENT,
+ LP5562_REG_W_CURRENT,
+ };
+
+ led->led_current = led_current;
+ lp55xx_write(led->chip, addr[led->chan_nr], led_current);
+}
+
+static void lp5562_load_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5562_MODE_ENG1_M,
+ [LP55XX_ENGINE_2] = LP5562_MODE_ENG2_M,
+ [LP55XX_ENGINE_3] = LP5562_MODE_ENG3_M,
+ };
+
+ u8 val[] = {
+ [LP55XX_ENGINE_1] = LP5562_LOAD_ENG1,
+ [LP55XX_ENGINE_2] = LP5562_LOAD_ENG2,
+ [LP55XX_ENGINE_3] = LP5562_LOAD_ENG3,
+ };
+
+ lp55xx_update_bits(chip, LP5562_REG_OP_MODE, mask[idx], val[idx]);
+
+ lp5562_wait_opmode_done();
+}
+
+static void lp5562_stop_engine(struct lp55xx_chip *chip)
+{
+ lp55xx_write(chip, LP5562_REG_OP_MODE, LP5562_CMD_DISABLE);
+ lp5562_wait_opmode_done();
+}
+
+static void lp5562_run_engine(struct lp55xx_chip *chip, bool start)
+{
+ int ret;
+ u8 mode;
+ u8 exec;
+
+ /* stop engine */
+ if (!start) {
+ lp55xx_write(chip, LP5562_REG_ENABLE, LP5562_ENABLE_DEFAULT);
+ lp5562_wait_enable_done();
+ lp5562_stop_engine(chip);
+ lp55xx_write(chip, LP5562_REG_ENG_SEL, LP5562_ENG_SEL_PWM);
+ lp55xx_write(chip, LP5562_REG_OP_MODE, LP5562_CMD_DIRECT);
+ lp5562_wait_opmode_done();
+ return;
+ }
+
+ /*
+ * To run the engine,
+ * operation mode and enable register should updated at the same time
+ */
+
+ ret = lp55xx_read(chip, LP5562_REG_OP_MODE, &mode);
+ if (ret)
+ return;
+
+ ret = lp55xx_read(chip, LP5562_REG_ENABLE, &exec);
+ if (ret)
+ return;
+
+ /* change operation mode to RUN only when each engine is loading */
+ if (LP5562_ENG1_IS_LOADING(mode)) {
+ mode = (mode & ~LP5562_MODE_ENG1_M) | LP5562_RUN_ENG1;
+ exec = (exec & ~LP5562_EXEC_ENG1_M) | LP5562_RUN_ENG1;
+ }
+
+ if (LP5562_ENG2_IS_LOADING(mode)) {
+ mode = (mode & ~LP5562_MODE_ENG2_M) | LP5562_RUN_ENG2;
+ exec = (exec & ~LP5562_EXEC_ENG2_M) | LP5562_RUN_ENG2;
+ }
+
+ if (LP5562_ENG3_IS_LOADING(mode)) {
+ mode = (mode & ~LP5562_MODE_ENG3_M) | LP5562_RUN_ENG3;
+ exec = (exec & ~LP5562_EXEC_ENG3_M) | LP5562_RUN_ENG3;
+ }
+
+ lp55xx_write(chip, LP5562_REG_OP_MODE, mode);
+ lp5562_wait_opmode_done();
+
+ lp55xx_update_bits(chip, LP5562_REG_ENABLE, LP5562_EXEC_M, exec);
+ lp5562_wait_enable_done();
+}
+
+static int lp5562_update_firmware(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 pattern[LP5562_PROGRAM_LENGTH] = {0};
+ u8 addr[] = {
+ [LP55XX_ENGINE_1] = LP5562_REG_PROG_MEM_ENG1,
+ [LP55XX_ENGINE_2] = LP5562_REG_PROG_MEM_ENG2,
+ [LP55XX_ENGINE_3] = LP5562_REG_PROG_MEM_ENG3,
+ };
+ unsigned cmd;
+ char c[3];
+ int program_size;
+ int nrchars;
+ int offset = 0;
+ int ret;
+ int i;
+
+ /* clear program memory before updating */
+ for (i = 0; i < LP5562_PROGRAM_LENGTH; i++)
+ lp55xx_write(chip, addr[idx] + i, 0);
+
+ i = 0;
+ while ((offset < size - 1) && (i < LP5562_PROGRAM_LENGTH)) {
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
+
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto err;
+
+ pattern[i] = (u8)cmd;
+ offset += nrchars;
+ i++;
+ }
+
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto err;
+
+ program_size = i;
+ for (i = 0; i < program_size; i++)
+ lp55xx_write(chip, addr[idx] + i, pattern[i]);
+
+ return 0;
+
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
+ return -EINVAL;
+}
+
+static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
+{
+ const struct firmware *fw = chip->fw;
+
+ if (fw->size > LP5562_PROGRAM_LENGTH) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+ }
+
+ /*
+ * Program momery sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
+
+ lp5562_load_engine(chip);
+ lp5562_update_firmware(chip, fw->data, fw->size);
+}
+
+static int lp5562_post_init_device(struct lp55xx_chip *chip)
+{
+ int ret;
+ u8 cfg = LP5562_DEFAULT_CFG;
+
+ /* Set all PWMs to direct control mode */
+ ret = lp55xx_write(chip, LP5562_REG_OP_MODE, LP5562_CMD_DIRECT);
+ if (ret)
+ return ret;
+
+ lp5562_wait_opmode_done();
+
+ /* Update configuration for the clock setting */
+ if (!lp55xx_is_extclk_used(chip))
+ cfg |= LP5562_CLK_INT;
+
+ ret = lp55xx_write(chip, LP5562_REG_CONFIG, cfg);
+ if (ret)
+ return ret;
+
+ /* Initialize all channels PWM to zero -> leds off */
+ lp55xx_write(chip, LP5562_REG_R_PWM, 0);
+ lp55xx_write(chip, LP5562_REG_G_PWM, 0);
+ lp55xx_write(chip, LP5562_REG_B_PWM, 0);
+ lp55xx_write(chip, LP5562_REG_W_PWM, 0);
+
+ /* Set LED map as register PWM by default */
+ lp55xx_write(chip, LP5562_REG_ENG_SEL, LP5562_ENG_SEL_PWM);
+
+ return 0;
+}
+
+static void lp5562_led_brightness_work(struct work_struct *work)
+{
+ struct lp55xx_led *led = container_of(work, struct lp55xx_led,
+ brightness_work);
+ struct lp55xx_chip *chip = led->chip;
+ u8 addr[] = {
+ LP5562_REG_R_PWM,
+ LP5562_REG_G_PWM,
+ LP5562_REG_B_PWM,
+ LP5562_REG_W_PWM,
+ };
+
+ mutex_lock(&chip->lock);
+ lp55xx_write(chip, addr[led->chan_nr], led->brightness);
+ mutex_unlock(&chip->lock);
+}
+
+static void lp5562_write_program_memory(struct lp55xx_chip *chip,
+ u8 base, const u8 *rgb, int size)
+{
+ int i;
+
+ if (!rgb || size <= 0)
+ return;
+
+ for (i = 0; i < size; i++)
+ lp55xx_write(chip, base + i, *(rgb + i));
+
+ lp55xx_write(chip, base + i, 0);
+ lp55xx_write(chip, base + i + 1, 0);
+}
+
+/* check the size of program count */
+static inline bool _is_pc_overflow(struct lp55xx_predef_pattern *ptn)
+{
+ return (ptn->size_r >= LP5562_PROGRAM_LENGTH ||
+ ptn->size_g >= LP5562_PROGRAM_LENGTH ||
+ ptn->size_b >= LP5562_PROGRAM_LENGTH);
+}
+
+static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
+{
+ struct lp55xx_predef_pattern *ptn;
+ int i;
+
+ if (mode == LP5562_PATTERN_OFF) {
+ lp5562_run_engine(chip, false);
+ return 0;
+ }
+
+ ptn = chip->pdata->patterns + (mode - 1);
+ if (!ptn || _is_pc_overflow(ptn)) {
+ dev_err(&chip->cl->dev, "invalid pattern data\n");
+ return -EINVAL;
+ }
+
+ lp5562_stop_engine(chip);
+
+ /* Set LED map as RGB */
+ lp55xx_write(chip, LP5562_REG_ENG_SEL, LP5562_ENG_SEL_RGB);
+
+ /* Load engines */
+ for (i = LP55XX_ENGINE_1; i <= LP55XX_ENGINE_3; i++) {
+ chip->engine_idx = i;
+ lp5562_load_engine(chip);
+ }
+
+ /* Clear program registers */
+ lp55xx_write(chip, LP5562_REG_PROG_MEM_ENG1, 0);
+ lp55xx_write(chip, LP5562_REG_PROG_MEM_ENG1 + 1, 0);
+ lp55xx_write(chip, LP5562_REG_PROG_MEM_ENG2, 0);
+ lp55xx_write(chip, LP5562_REG_PROG_MEM_ENG2 + 1, 0);
+ lp55xx_write(chip, LP5562_REG_PROG_MEM_ENG3, 0);
+ lp55xx_write(chip, LP5562_REG_PROG_MEM_ENG3 + 1, 0);
+
+ /* Program engines */
+ lp5562_write_program_memory(chip, LP5562_REG_PROG_MEM_ENG1,
+ ptn->r, ptn->size_r);
+ lp5562_write_program_memory(chip, LP5562_REG_PROG_MEM_ENG2,
+ ptn->g, ptn->size_g);
+ lp5562_write_program_memory(chip, LP5562_REG_PROG_MEM_ENG3,
+ ptn->b, ptn->size_b);
+
+ /* Run engines */
+ lp5562_run_engine(chip, true);
+
+ return 0;
+}
+
+static ssize_t lp5562_store_pattern(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_predef_pattern *ptn = chip->pdata->patterns;
+ int num_patterns = chip->pdata->num_patterns;
+ unsigned long mode;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &mode);
+ if (ret)
+ return ret;
+
+ if (mode > num_patterns || !ptn)
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
+ ret = lp5562_run_predef_led_pattern(chip, mode);
+ mutex_unlock(&chip->lock);
+
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t lp5562_store_engine_mux(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ u8 mask;
+ u8 val;
+
+ /* LED map
+ * R ... Engine 1 (fixed)
+ * G ... Engine 2 (fixed)
+ * B ... Engine 3 (fixed)
+ * W ... Engine 1 or 2 or 3
+ */
+
+ if (sysfs_streq(buf, "RGB")) {
+ mask = LP5562_ENG_FOR_RGB_M;
+ val = LP5562_ENG_SEL_RGB;
+ } else if (sysfs_streq(buf, "W")) {
+ enum lp55xx_engine_index idx = chip->engine_idx;
+
+ mask = LP5562_ENG_FOR_W_M;
+ switch (idx) {
+ case LP55XX_ENGINE_1:
+ val = LP5562_ENG1_FOR_W;
+ break;
+ case LP55XX_ENGINE_2:
+ val = LP5562_ENG2_FOR_W;
+ break;
+ case LP55XX_ENGINE_3:
+ val = LP5562_ENG3_FOR_W;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ } else {
+ dev_err(dev, "choose RGB or W\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ lp55xx_update_bits(chip, LP5562_REG_ENG_SEL, mask, val);
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(led_pattern, S_IWUSR, NULL, lp5562_store_pattern);
+static DEVICE_ATTR(engine_mux, S_IWUSR, NULL, lp5562_store_engine_mux);
+
+static struct attribute *lp5562_attributes[] = {
+ &dev_attr_led_pattern.attr,
+ &dev_attr_engine_mux.attr,
+ NULL,
+};
+
+static const struct attribute_group lp5562_group = {
+ .attrs = lp5562_attributes,
+};
+
+/* Chip specific configurations */
+static struct lp55xx_device_config lp5562_cfg = {
+ .max_channel = LP5562_MAX_LEDS,
+ .reset = {
+ .addr = LP5562_REG_RESET,
+ .val = LP5562_RESET,
+ },
+ .enable = {
+ .addr = LP5562_REG_ENABLE,
+ .val = LP5562_ENABLE_DEFAULT,
+ },
+ .post_init_device = lp5562_post_init_device,
+ .set_led_current = lp5562_set_led_current,
+ .brightness_work_fn = lp5562_led_brightness_work,
+ .run_engine = lp5562_run_engine,
+ .firmware_cb = lp5562_firmware_loaded,
+ .dev_attr_group = &lp5562_group,
+};
+
+static int lp5562_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata = client->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ led = devm_kzalloc(&client->dev,
+ sizeof(*led) * pdata->num_channels, GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ chip->cl = client;
+ chip->pdata = pdata;
+ chip->cfg = &lp5562_cfg;
+
+ mutex_init(&chip->lock);
+
+ i2c_set_clientdata(client, led);
+
+ ret = lp55xx_init_device(chip);
+ if (ret)
+ goto err_init;
+
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_register_leds;
+
+ ret = lp55xx_register_sysfs(chip);
+ if (ret) {
+ dev_err(&client->dev, "registering sysfs failed\n");
+ goto err_register_sysfs;
+ }
+
+ return 0;
+
+err_register_sysfs:
+ lp55xx_unregister_leds(led, chip);
+err_register_leds:
+ lp55xx_deinit_device(chip);
+err_init:
+ return ret;
+}
+
+static int lp5562_remove(struct i2c_client *client)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
+
+ lp5562_stop_engine(chip);
+
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_unregister_leds(led, chip);
+ lp55xx_deinit_device(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp5562_id[] = {
+ { "lp5562", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp5562_id);
+
+static struct i2c_driver lp5562_driver = {
+ .driver = {
+ .name = "lp5562",
+ },
+ .probe = lp5562_probe,
+ .remove = lp5562_remove,
+ .id_table = lp5562_id,
+};
+
+module_i2c_driver(lp5562_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP5562 LED Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index d9eb841..ba34199 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -1,5 +1,5 @@
/*
- * LP5521/LP5523/LP55231 Common Driver
+ * LP5521/LP5523/LP55231/LP5562 Common Driver
*
* Copyright 2012 Texas Instruments
*
@@ -12,6 +12,7 @@
* Derived from leds-lp5521.c, leds-lp5523.c
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -21,6 +22,9 @@
#include "leds-lp55xx-common.h"
+/* External clock rate */
+#define LP55XX_CLK_32K 32768
+
static struct lp55xx_led *cdev_to_lp55xx_led(struct led_classdev *cdev)
{
return container_of(cdev, struct lp55xx_led, cdev);
@@ -80,7 +84,7 @@ static ssize_t lp55xx_show_current(struct device *dev,
{
struct lp55xx_led *led = dev_to_lp55xx_led(dev);
- return sprintf(buf, "%d\n", led->led_current);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", led->led_current);
}
static ssize_t lp55xx_store_current(struct device *dev,
@@ -113,7 +117,7 @@ static ssize_t lp55xx_show_max_current(struct device *dev,
{
struct lp55xx_led *led = dev_to_lp55xx_led(dev);
- return sprintf(buf, "%d\n", led->max_current);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", led->max_current);
}
static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, lp55xx_show_current,
@@ -357,6 +361,35 @@ int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg, u8 mask, u8 val)
}
EXPORT_SYMBOL_GPL(lp55xx_update_bits);
+bool lp55xx_is_extclk_used(struct lp55xx_chip *chip)
+{
+ struct clk *clk;
+ int err;
+
+ clk = devm_clk_get(&chip->cl->dev, "32k_clk");
+ if (IS_ERR(clk))
+ goto use_internal_clk;
+
+ err = clk_prepare_enable(clk);
+ if (err)
+ goto use_internal_clk;
+
+ if (clk_get_rate(clk) != LP55XX_CLK_32K) {
+ clk_disable_unprepare(clk);
+ goto use_internal_clk;
+ }
+
+ dev_info(&chip->cl->dev, "%dHz external clock used\n", LP55XX_CLK_32K);
+
+ chip->clk = clk;
+ return true;
+
+use_internal_clk:
+ dev_info(&chip->cl->dev, "internal clock used\n");
+ return false;
+}
+EXPORT_SYMBOL_GPL(lp55xx_is_extclk_used);
+
int lp55xx_init_device(struct lp55xx_chip *chip)
{
struct lp55xx_platform_data *pdata;
@@ -421,6 +454,9 @@ void lp55xx_deinit_device(struct lp55xx_chip *chip)
{
struct lp55xx_platform_data *pdata = chip->pdata;
+ if (chip->clk)
+ clk_disable_unprepare(chip->clk);
+
if (pdata->enable)
pdata->enable(0);
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index ece4761..fa6a078 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -83,6 +83,7 @@ struct lp55xx_device_config {
*/
struct lp55xx_chip {
struct i2c_client *cl;
+ struct clk *clk;
struct lp55xx_platform_data *pdata;
struct mutex lock; /* lock for user-space interface */
int num_leds;
@@ -117,6 +118,9 @@ extern int lp55xx_read(struct lp55xx_chip *chip, u8 reg, u8 *val);
extern int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg,
u8 mask, u8 val);
+/* external clock detection */
+extern bool lp55xx_is_extclk_used(struct lp55xx_chip *chip);
+
/* common device init/deinit functions */
extern int lp55xx_init_device(struct lp55xx_chip *chip);
extern void lp55xx_deinit_device(struct lp55xx_chip *chip);
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index c9b9e1f..ca48a7d5 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -106,8 +106,9 @@ static int create_lt3593_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = devm_gpio_request_one(parent, template->gpio,
- GPIOF_DIR_OUT | state, template->name);
+ ret = devm_gpio_request_one(parent, template->gpio, state ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ template->name);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index d978171..70137b1 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -193,7 +193,8 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
enum ns2_led_modes mode;
ret = devm_gpio_request_one(&pdev->dev, template->cmd,
- GPIOF_DIR_OUT | gpio_get_value(template->cmd),
+ gpio_get_value(template->cmd) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup command GPIO\n",
@@ -202,7 +203,8 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
}
ret = devm_gpio_request_one(&pdev->dev, template->slow,
- GPIOF_DIR_OUT | gpio_get_value(template->slow),
+ gpio_get_value(template->slow) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n",
@@ -306,10 +308,21 @@ static const struct of_device_id of_ns2_leds_match[] = {
};
#endif /* CONFIG_OF_GPIO */
+struct ns2_led_priv {
+ int num_leds;
+ struct ns2_led_data leds_data[];
+};
+
+static inline int sizeof_ns2_led_priv(int num_leds)
+{
+ return sizeof(struct ns2_led_priv) +
+ (sizeof(struct ns2_led_data) * num_leds);
+}
+
static int ns2_led_probe(struct platform_device *pdev)
{
struct ns2_led_platform_data *pdata = pdev->dev.platform_data;
- struct ns2_led_data *leds_data;
+ struct ns2_led_priv *priv;
int i;
int ret;
@@ -330,21 +343,23 @@ static int ns2_led_probe(struct platform_device *pdev)
return -EINVAL;
#endif /* CONFIG_OF_GPIO */
- leds_data = devm_kzalloc(&pdev->dev, sizeof(struct ns2_led_data) *
- pdata->num_leds, GFP_KERNEL);
- if (!leds_data)
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof_ns2_led_priv(pdata->num_leds), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ priv->num_leds = pdata->num_leds;
- for (i = 0; i < pdata->num_leds; i++) {
- ret = create_ns2_led(pdev, &leds_data[i], &pdata->leds[i]);
+ for (i = 0; i < priv->num_leds; i++) {
+ ret = create_ns2_led(pdev, &priv->leds_data[i],
+ &pdata->leds[i]);
if (ret < 0) {
for (i = i - 1; i >= 0; i--)
- delete_ns2_led(&leds_data[i]);
+ delete_ns2_led(&priv->leds_data[i]);
return ret;
}
}
- platform_set_drvdata(pdev, leds_data);
+ platform_set_drvdata(pdev, priv);
return 0;
}
@@ -352,13 +367,12 @@ static int ns2_led_probe(struct platform_device *pdev)
static int ns2_led_remove(struct platform_device *pdev)
{
int i;
- struct ns2_led_platform_data *pdata = pdev->dev.platform_data;
- struct ns2_led_data *leds_data;
+ struct ns2_led_priv *priv;
- leds_data = platform_get_drvdata(pdev);
+ priv = platform_get_drvdata(pdev);
- for (i = 0; i < pdata->num_leds; i++)
- delete_ns2_led(&leds_data[i]);
+ for (i = 0; i < priv->num_leds; i++)
+ delete_ns2_led(&priv->leds_data[i]);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index a1ea5f6..faf52c0 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -23,12 +23,16 @@
#include <linux/pwm.h>
#include <linux/leds_pwm.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
+ struct work_struct work;
unsigned int active_low;
unsigned int period;
+ int duty;
+ bool can_sleep;
};
struct led_pwm_priv {
@@ -36,6 +40,26 @@ struct led_pwm_priv {
struct led_pwm_data leds[0];
};
+static void __led_pwm_set(struct led_pwm_data *led_dat)
+{
+ int new_duty = led_dat->duty;
+
+ pwm_config(led_dat->pwm, new_duty, led_dat->period);
+
+ if (new_duty == 0)
+ pwm_disable(led_dat->pwm);
+ else
+ pwm_enable(led_dat->pwm);
+}
+
+static void led_pwm_work(struct work_struct *work)
+{
+ struct led_pwm_data *led_dat =
+ container_of(work, struct led_pwm_data, work);
+
+ __led_pwm_set(led_dat);
+}
+
static void led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -44,13 +68,12 @@ static void led_pwm_set(struct led_classdev *led_cdev,
unsigned int max = led_dat->cdev.max_brightness;
unsigned int period = led_dat->period;
- if (brightness == 0) {
- pwm_config(led_dat->pwm, 0, period);
- pwm_disable(led_dat->pwm);
- } else {
- pwm_config(led_dat->pwm, brightness * period / max, period);
- pwm_enable(led_dat->pwm);
- }
+ led_dat->duty = brightness * period / max;
+
+ if (led_dat->can_sleep)
+ schedule_work(&led_dat->work);
+ else
+ __led_pwm_set(led_dat);
}
static inline size_t sizeof_pwm_leds_priv(int num_leds)
@@ -100,6 +123,10 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
+ if (led_dat->can_sleep)
+ INIT_WORK(&led_dat->work, led_pwm_work);
+
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register for %s\n",
@@ -153,6 +180,10 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->cdev.max_brightness = cur_led->max_brightness;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
+ if (led_dat->can_sleep)
+ INIT_WORK(&led_dat->work, led_pwm_work);
+
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0)
goto err;
@@ -180,8 +211,11 @@ static int led_pwm_remove(struct platform_device *pdev)
struct led_pwm_priv *priv = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < priv->num_leds; i++)
+ for (i = 0; i < priv->num_leds; i++) {
led_classdev_unregister(&priv->leds[i].cdev);
+ if (priv->leds[i].can_sleep)
+ cancel_work_sync(&priv->leds[i].work);
+ }
return 0;
}
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index d3c2b7e..9483f1c 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -205,7 +205,8 @@ static void r_tpu_set_pin(struct r_tpu_priv *p, enum r_tpu_pin new_state,
gpio_free(cfg->pin_gpio_fn);
if (new_state == R_TPU_PIN_GPIO)
- gpio_request_one(cfg->pin_gpio, GPIOF_DIR_OUT | !!brightness,
+ gpio_request_one(cfg->pin_gpio, !!brightness ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
cfg->name);
if (new_state == R_TPU_PIN_GPIO_FN)
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 070ba07..98fe021 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -85,6 +85,7 @@
#include <linux/gpio.h>
#include <linux/workqueue.h>
#include <linux/leds-tca6507.h>
+#include <linux/of.h>
/* LED select registers determine the source that drives LED outputs */
#define TCA6507_LS_LED_OFF 0x0 /* Output HI-Z (off) */
@@ -724,7 +725,6 @@ tca6507_led_dt_init(struct i2c_client *client)
return ERR_PTR(-ENODEV);
}
-#define of_tca6507_leds_match NULL
#endif
static int tca6507_probe(struct i2c_client *client,
@@ -813,7 +813,7 @@ static struct i2c_driver tca6507_driver = {
.driver = {
.name = "leds-tca6507",
.owner = THIS_MODULE,
- .of_match_table = of_tca6507_leds_match,
+ .of_match_table = of_match_ptr(of_tca6507_leds_match),
},
.probe = tca6507_probe,
.remove = tca6507_remove,
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index ed15157..8a181d5 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -129,7 +129,10 @@ static void wm8350_led_disable(struct wm8350_led *led)
ret = regulator_disable(led->isink);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to disable ISINK: %d\n", ret);
- regulator_enable(led->dcdc);
+ ret = regulator_enable(led->dcdc);
+ if (ret != 0)
+ dev_err(led->cdev.dev, "Failed to reenable DCDC: %d\n",
+ ret);
return;
}
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
new file mode 100644
index 0000000..49794b4
--- /dev/null
+++ b/drivers/leds/trigger/Kconfig
@@ -0,0 +1,111 @@
+menuconfig LEDS_TRIGGERS
+ bool "LED Trigger support"
+ depends on LEDS_CLASS
+ help
+ This option enables trigger support for the leds class.
+ These triggers allow kernel events to drive the LEDs and can
+ be configured via sysfs. If unsure, say Y.
+
+if LEDS_TRIGGERS
+
+config LEDS_TRIGGER_TIMER
+ tristate "LED Timer Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by a programmable timer
+ via sysfs. Some LED hardware can be programmed to start
+ blinking the LED without any further software interaction.
+ For more details read Documentation/leds/leds-class.txt.
+
+ If unsure, say Y.
+
+config LEDS_TRIGGER_ONESHOT
+ tristate "LED One-shot Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to blink in one-shot pulses with parameters
+ controlled via sysfs. It's useful to notify the user on
+ sporadic events, when there are no clear begin and end trap points,
+ or on dense events, where this blinks the LED at constant rate if
+ rearmed continuously.
+
+ It also shows how to use the led_blink_set_oneshot() function.
+
+ If unsure, say Y.
+
+config LEDS_TRIGGER_IDE_DISK
+ bool "LED IDE Disk Trigger"
+ depends on IDE_GD_ATA
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by IDE disk activity.
+ If unsure, say Y.
+
+config LEDS_TRIGGER_HEARTBEAT
+ tristate "LED Heartbeat Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by a CPU load average.
+ The flash frequency is a hyperbolic function of the 1-minute
+ load average.
+ If unsure, say Y.
+
+config LEDS_TRIGGER_BACKLIGHT
+ tristate "LED backlight Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled as a backlight device: they
+ turn off and on when the display is blanked and unblanked.
+
+ If unsure, say N.
+
+config LEDS_TRIGGER_CPU
+ bool "LED CPU Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by active CPUs. This shows
+ the active CPUs across an array of LEDs so you can see which
+ CPUs are active on the system at any given moment.
+
+ If unsure, say N.
+
+config LEDS_TRIGGER_GPIO
+ tristate "LED GPIO Trigger"
+ depends on LEDS_TRIGGERS
+ depends on GPIOLIB
+ help
+ This allows LEDs to be controlled by gpio events. It's good
+ when using gpios as switches and triggering the needed LEDs
+ from there. One use case is n810's keypad LEDs that could
+ be triggered by this trigger when user slides up to show
+ keypad.
+
+ If unsure, say N.
+
+config LEDS_TRIGGER_DEFAULT_ON
+ tristate "LED Default ON Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be initialised in the ON state.
+ If unsure, say Y.
+
+comment "iptables trigger is under Netfilter config (LED target)"
+ depends on LEDS_TRIGGERS
+
+config LEDS_TRIGGER_TRANSIENT
+ tristate "LED Transient Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows one time activation of a transient state on
+ GPIO/PWM based hardware.
+ If unsure, say Y.
+
+config LEDS_TRIGGER_CAMERA
+ tristate "LED Camera Flash/Torch Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled as a camera flash/torch device.
+ This enables direct flash/torch on/off by the driver, kernel space.
+ If unsure, say Y.
+
+endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
new file mode 100644
index 0000000..1abf48d
--- /dev/null
+++ b/drivers/leds/trigger/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
+obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
+obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
+obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
+obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
+obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
+obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
+obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
+obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 027a2b1..3c9c88a 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/leds.h>
-#include "leds.h"
+#include "../leds.h"
#define BLANK 1
#define UNBLANK 0
diff --git a/drivers/leds/trigger/ledtrig-camera.c b/drivers/leds/trigger/ledtrig-camera.c
new file mode 100644
index 0000000..9bd73a8
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-camera.c
@@ -0,0 +1,57 @@
+/*
+ * Camera Flash and Torch On/Off Trigger
+ *
+ * based on ledtrig-ide-disk.c
+ *
+ * Copyright 2013 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+
+DEFINE_LED_TRIGGER(ledtrig_flash);
+DEFINE_LED_TRIGGER(ledtrig_torch);
+
+void ledtrig_flash_ctrl(bool on)
+{
+ enum led_brightness brt = on ? LED_FULL : LED_OFF;
+
+ led_trigger_event(ledtrig_flash, brt);
+}
+EXPORT_SYMBOL_GPL(ledtrig_flash_ctrl);
+
+void ledtrig_torch_ctrl(bool on)
+{
+ enum led_brightness brt = on ? LED_FULL : LED_OFF;
+
+ led_trigger_event(ledtrig_torch, brt);
+}
+EXPORT_SYMBOL_GPL(ledtrig_torch_ctrl);
+
+static int __init ledtrig_camera_init(void)
+{
+ led_trigger_register_simple("flash", &ledtrig_flash);
+ led_trigger_register_simple("torch", &ledtrig_torch);
+ return 0;
+}
+module_init(ledtrig_camera_init);
+
+static void __exit ledtrig_camera_exit(void)
+{
+ led_trigger_unregister_simple(ledtrig_torch);
+ led_trigger_unregister_simple(ledtrig_flash);
+}
+module_exit(ledtrig_camera_exit);
+
+MODULE_DESCRIPTION("LED Trigger for Camera Flash/Torch Control");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 4239b39..118335e 100644
--- a/drivers/leds/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -26,7 +26,7 @@
#include <linux/percpu.h>
#include <linux/syscore_ops.h>
#include <linux/rwsem.h>
-#include "leds.h"
+#include "../leds.h"
#define MAX_NAME_LEN 8
diff --git a/drivers/leds/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index eac1f1b..81a91be 100644
--- a/drivers/leds/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/leds.h>
-#include "leds.h"
+#include "../leds.h"
static void defon_trig_activate(struct led_classdev *led_cdev)
{
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 72e3ebf..35812e3 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -17,7 +17,7 @@
#include <linux/workqueue.h>
#include <linux/leds.h>
#include <linux/slab.h>
-#include "leds.h"
+#include "../leds.h"
struct gpio_trig_data {
struct led_classdev *led;
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 1edc746..5c8464a 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/leds.h>
#include <linux/reboot.h>
-#include "leds.h"
+#include "../leds.h"
static int panic_heartbeats;
diff --git a/drivers/leds/ledtrig-ide-disk.c b/drivers/leds/trigger/ledtrig-ide-disk.c
index 2cd7c0c..2cd7c0c 100644
--- a/drivers/leds/ledtrig-ide-disk.c
+++ b/drivers/leds/trigger/ledtrig-ide-disk.c
diff --git a/drivers/leds/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c
index 2c029aa..cb4c746 100644
--- a/drivers/leds/ledtrig-oneshot.c
+++ b/drivers/leds/trigger/ledtrig-oneshot.c
@@ -18,7 +18,7 @@
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/leds.h>
-#include "leds.h"
+#include "../leds.h"
#define DEFAULT_DELAY 100
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index f774d05..8d09327 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/leds.h>
-#include "leds.h"
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 398f104..e5abc00 100644
--- a/drivers/leds/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/leds.h>
-#include "leds.h"
+#include "../leds.h"
struct transient_trig_data {
int activate;
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 89875ea..ee035ec 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -5,10 +5,9 @@ config LGUEST
---help---
This is a very simple module which allows you to run
multiple instances of the same Linux kernel, using the
- "lguest" command found in the Documentation/virtual/lguest
- directory.
+ "lguest" command found in the tools/lguest directory.
Note that "lguest" is pronounced to rhyme with "fell quest",
- not "rustyvisor". See Documentation/virtual/lguest/lguest.txt.
+ not "rustyvisor". See tools/lguest/lguest.txt.
If unsure, say N. If curious, say M. If masochistic, say Y.
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index a5ebc00..0bf1e4e 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -20,9 +20,9 @@
#include <asm/asm-offsets.h>
#include "lg.h"
-
+unsigned long switcher_addr;
+struct page **lg_switcher_pages;
static struct vm_struct *switcher_vma;
-static struct page **switcher_page;
/* This One Big lock protects all inter-guest data structures. */
DEFINE_MUTEX(lguest_lock);
@@ -52,13 +52,21 @@ static __init int map_switcher(void)
* easy.
*/
+ /* We assume Switcher text fits into a single page. */
+ if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
+ printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
+ end_switcher_text - start_switcher_text);
+ return -EINVAL;
+ }
+
/*
* We allocate an array of struct page pointers. map_vm_area() wants
* this, rather than just an array of pages.
*/
- switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
- GFP_KERNEL);
- if (!switcher_page) {
+ lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
+ * TOTAL_SWITCHER_PAGES,
+ GFP_KERNEL);
+ if (!lg_switcher_pages) {
err = -ENOMEM;
goto out;
}
@@ -68,32 +76,29 @@ static __init int map_switcher(void)
* so we make sure they're zeroed.
*/
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
- switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!switcher_page[i]) {
+ lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!lg_switcher_pages[i]) {
err = -ENOMEM;
goto free_some_pages;
}
}
/*
- * First we check that the Switcher won't overlap the fixmap area at
- * the top of memory. It's currently nowhere near, but it could have
- * very strange effects if it ever happened.
+ * We place the Switcher underneath the fixmap area, which is the
+ * highest virtual address we can get. This is important, since we
+ * tell the Guest it can't access this memory, so we want its ceiling
+ * as high as possible.
*/
- if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
- err = -ENOMEM;
- printk("lguest: mapping switcher would thwack fixmap\n");
- goto free_pages;
- }
+ switcher_addr = FIXADDR_START - (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE;
/*
- * Now we reserve the "virtual memory area" we want: 0xFFC00000
- * (SWITCHER_ADDR). We might not get it in theory, but in practice
- * it's worked so far. The end address needs +1 because __get_vm_area
- * allocates an extra guard page, so we need space for that.
+ * Now we reserve the "virtual memory area" we want. We might
+ * not get it in theory, but in practice it's worked so far.
+ * The end address needs +1 because __get_vm_area allocates an
+ * extra guard page, so we need space for that.
*/
switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
- VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
+ VM_ALLOC, switcher_addr, switcher_addr
+ (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
if (!switcher_vma) {
err = -ENOMEM;
@@ -103,12 +108,12 @@ static __init int map_switcher(void)
/*
* This code actually sets up the pages we've allocated to appear at
- * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the
+ * switcher_addr. map_vm_area() takes the vma we allocated above, the
* kind of pages we're mapping (kernel pages), and a pointer to our
* array of struct pages. It increments that pointer, but we don't
* care.
*/
- pagep = switcher_page;
+ pagep = lg_switcher_pages;
err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
if (err) {
printk("lguest: map_vm_area failed: %i\n", err);
@@ -133,8 +138,8 @@ free_pages:
i = TOTAL_SWITCHER_PAGES;
free_some_pages:
for (--i; i >= 0; i--)
- __free_pages(switcher_page[i], 0);
- kfree(switcher_page);
+ __free_pages(lg_switcher_pages[i], 0);
+ kfree(lg_switcher_pages);
out:
return err;
}
@@ -149,8 +154,8 @@ static void unmap_switcher(void)
vunmap(switcher_vma->addr);
/* Now we just need to free the pages we copied the switcher into */
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
- __free_pages(switcher_page[i], 0);
- kfree(switcher_page);
+ __free_pages(lg_switcher_pages[i], 0);
+ kfree(lg_switcher_pages);
}
/*H:032
@@ -323,15 +328,10 @@ static int __init init(void)
if (err)
goto out;
- /* Now we set up the pagetable implementation for the Guests. */
- err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
- if (err)
- goto unmap;
-
/* We might need to reserve an interrupt vector. */
err = init_interrupts();
if (err)
- goto free_pgtables;
+ goto unmap;
/* /dev/lguest needs to be registered. */
err = lguest_device_init();
@@ -346,8 +346,6 @@ static int __init init(void)
free_interrupts:
free_interrupts();
-free_pgtables:
- free_pagetables();
unmap:
unmap_switcher();
out:
@@ -359,7 +357,6 @@ static void __exit fini(void)
{
lguest_device_remove();
free_interrupts();
- free_pagetables();
unmap_switcher();
lguest_arch_host_fini();
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 295df06..2eef40b 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -14,11 +14,10 @@
#include <asm/lguest.h>
-void free_pagetables(void);
-int init_pagetables(struct page **switcher_page, unsigned int pages);
-
struct pgdir {
unsigned long gpgdir;
+ bool switcher_mapped;
+ int last_host_cpu;
pgd_t *pgdir;
};
@@ -124,6 +123,7 @@ bool lguest_address_ok(const struct lguest *lg,
unsigned long addr, unsigned long len);
void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
+extern struct page **lg_switcher_pages;
/*H:035
* Using memory-copy operations like that is usually inconvient, so we
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index ff4a0bc..4263f4c 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -250,13 +250,13 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
*/
static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
{
- /* We have a limited number the number of CPUs in the lguest struct. */
+ /* We have a limited number of CPUs in the lguest struct. */
if (id >= ARRAY_SIZE(cpu->lg->cpus))
return -EINVAL;
/* Set up this CPU's id, and pointer back to the lguest struct. */
cpu->id = id;
- cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
+ cpu->lg = container_of(cpu, struct lguest, cpus[id]);
cpu->lg->nr_cpus++;
/* Each CPU has a timer it can set. */
@@ -270,7 +270,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
if (!cpu->regs_page)
return -ENOMEM;
- /* We actually put the registers at the bottom of the page. */
+ /* We actually put the registers at the end of the page. */
cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
/*
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 864baab..699187a 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -7,7 +7,7 @@
* converted Guest pages when running the Guest.
:*/
-/* Copyright (C) Rusty Russell IBM Corporation 2006.
+/* Copyright (C) Rusty Russell IBM Corporation 2013.
* GPL v2 and any later version */
#include <linux/mm.h>
#include <linux/gfp.h>
@@ -62,22 +62,11 @@
* will need the last pmd entry of the last pmd page.
*/
#ifdef CONFIG_X86_PAE
-#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
-#define RESERVE_MEM 2U
#define CHECK_GPGD_MASK _PAGE_PRESENT
#else
-#define RESERVE_MEM 4U
#define CHECK_GPGD_MASK _PAGE_TABLE
#endif
-/*
- * We actually need a separate PTE page for each CPU. Remember that after the
- * Switcher code itself comes two pages for each CPU, and we don't want this
- * CPU's guest to see the pages of any other CPU.
- */
-static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
-#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
-
/*H:320
* The page table code is curly enough to need helper functions to keep it
* clear and clean. The kernel itself provides many of them; one advantage
@@ -95,13 +84,6 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
{
unsigned int index = pgd_index(vaddr);
-#ifndef CONFIG_X86_PAE
- /* We kill any Guest trying to touch the Switcher addresses. */
- if (index >= SWITCHER_PGD_INDEX) {
- kill_guest(cpu, "attempt to access switcher pages");
- index = 0;
- }
-#endif
/* Return a pointer index'th pgd entry for the i'th page table. */
return &cpu->lg->pgdirs[i].pgdir[index];
}
@@ -117,13 +99,6 @@ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
unsigned int index = pmd_index(vaddr);
pmd_t *page;
- /* We kill any Guest trying to touch the Switcher addresses. */
- if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
- index >= SWITCHER_PMD_INDEX) {
- kill_guest(cpu, "attempt to access switcher pages");
- index = 0;
- }
-
/* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
@@ -275,122 +250,177 @@ static void release_pte(pte_t pte)
}
/*:*/
-static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
+static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
{
if ((pte_flags(gpte) & _PAGE_PSE) ||
- pte_pfn(gpte) >= cpu->lg->pfn_limit)
+ pte_pfn(gpte) >= cpu->lg->pfn_limit) {
kill_guest(cpu, "bad page table entry");
+ return false;
+ }
+ return true;
}
-static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
+static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
{
if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
- (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
+ (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
kill_guest(cpu, "bad page directory entry");
+ return false;
+ }
+ return true;
}
#ifdef CONFIG_X86_PAE
-static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
+static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
{
if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
- (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
+ (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
kill_guest(cpu, "bad page middle directory entry");
+ return false;
+ }
+ return true;
}
#endif
-/*H:330
- * (i) Looking up a page table entry when the Guest faults.
- *
- * We saw this call in run_guest(): when we see a page fault in the Guest, we
- * come here. That's because we only set up the shadow page tables lazily as
- * they're needed, so we get page faults all the time and quietly fix them up
- * and return to the Guest without it knowing.
+/*H:331
+ * This is the core routine to walk the shadow page tables and find the page
+ * table entry for a specific address.
*
- * If we fixed up the fault (ie. we mapped the address), this routine returns
- * true. Otherwise, it was a real fault and we need to tell the Guest.
+ * If allocate is set, then we allocate any missing levels, setting the flags
+ * on the new page directory and mid-level directories using the arguments
+ * (which are copied from the Guest's page table entries).
*/
-bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
+ int pgd_flags, int pmd_flags)
{
- pgd_t gpgd;
pgd_t *spgd;
- unsigned long gpte_ptr;
- pte_t gpte;
- pte_t *spte;
-
/* Mid level for PAE. */
#ifdef CONFIG_X86_PAE
pmd_t *spmd;
- pmd_t gpmd;
#endif
- /* First step: get the top-level Guest page table entry. */
- if (unlikely(cpu->linear_pages)) {
- /* Faking up a linear mapping. */
- gpgd = __pgd(CHECK_GPGD_MASK);
- } else {
- gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
- /* Toplevel not present? We can't map it in. */
- if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
- return false;
- }
-
- /* Now look at the matching shadow entry. */
+ /* Get top level entry. */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
/* No shadow entry: allocate a new shadow PTE page. */
- unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+ unsigned long ptepage;
+
+ /* If they didn't want us to allocate anything, stop. */
+ if (!allocate)
+ return NULL;
+
+ ptepage = get_zeroed_page(GFP_KERNEL);
/*
* This is not really the Guest's fault, but killing it is
* simple for this corner case.
*/
if (!ptepage) {
kill_guest(cpu, "out of memory allocating pte page");
- return false;
+ return NULL;
}
- /* We check that the Guest pgd is OK. */
- check_gpgd(cpu, gpgd);
/*
* And we copy the flags to the shadow PGD entry. The page
* number in the shadow PGD is the page we just allocated.
*/
- set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
+ set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
}
+ /*
+ * Intel's Physical Address Extension actually uses three levels of
+ * page tables, so we need to look in the mid-level.
+ */
#ifdef CONFIG_X86_PAE
- if (unlikely(cpu->linear_pages)) {
- /* Faking up a linear mapping. */
- gpmd = __pmd(_PAGE_TABLE);
- } else {
- gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- /* Middle level not present? We can't map it in. */
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
- return false;
- }
-
- /* Now look at the matching shadow entry. */
+ /* Now look at the mid-level shadow entry. */
spmd = spmd_addr(cpu, *spgd, vaddr);
if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
/* No shadow entry: allocate a new shadow PTE page. */
- unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+ unsigned long ptepage;
+
+ /* If they didn't want us to allocate anything, stop. */
+ if (!allocate)
+ return NULL;
+
+ ptepage = get_zeroed_page(GFP_KERNEL);
/*
* This is not really the Guest's fault, but killing it is
* simple for this corner case.
*/
if (!ptepage) {
- kill_guest(cpu, "out of memory allocating pte page");
- return false;
+ kill_guest(cpu, "out of memory allocating pmd page");
+ return NULL;
}
- /* We check that the Guest pmd is OK. */
- check_gpmd(cpu, gpmd);
-
/*
* And we copy the flags to the shadow PMD entry. The page
* number in the shadow PMD is the page we just allocated.
*/
- set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
+ set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
+ }
+#endif
+
+ /* Get the pointer to the shadow PTE entry we're going to set. */
+ return spte_addr(cpu, *spgd, vaddr);
+}
+
+/*H:330
+ * (i) Looking up a page table entry when the Guest faults.
+ *
+ * We saw this call in run_guest(): when we see a page fault in the Guest, we
+ * come here. That's because we only set up the shadow page tables lazily as
+ * they're needed, so we get page faults all the time and quietly fix them up
+ * and return to the Guest without it knowing.
+ *
+ * If we fixed up the fault (ie. we mapped the address), this routine returns
+ * true. Otherwise, it was a real fault and we need to tell the Guest.
+ */
+bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+{
+ unsigned long gpte_ptr;
+ pte_t gpte;
+ pte_t *spte;
+ pmd_t gpmd;
+ pgd_t gpgd;
+
+ /* We never demand page the Switcher, so trying is a mistake. */
+ if (vaddr >= switcher_addr)
+ return false;
+
+ /* First step: get the top-level Guest page table entry. */
+ if (unlikely(cpu->linear_pages)) {
+ /* Faking up a linear mapping. */
+ gpgd = __pgd(CHECK_GPGD_MASK);
+ } else {
+ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ /* Toplevel not present? We can't map it in. */
+ if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ return false;
+
+ /*
+ * This kills the Guest if it has weird flags or tries to
+ * refer to a "physical" address outside the bounds.
+ */
+ if (!check_gpgd(cpu, gpgd))
+ return false;
+ }
+
+ /* This "mid-level" entry is only used for non-linear, PAE mode. */
+ gpmd = __pmd(_PAGE_TABLE);
+
+#ifdef CONFIG_X86_PAE
+ if (likely(!cpu->linear_pages)) {
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ /* Middle level not present? We can't map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return false;
+
+ /*
+ * This kills the Guest if it has weird flags or tries to
+ * refer to a "physical" address outside the bounds.
+ */
+ if (!check_gpmd(cpu, gpmd))
+ return false;
}
/*
@@ -433,7 +463,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* Check that the Guest PTE flags are OK, and the page number is below
* the pfn_limit (ie. not mapping the Launcher binary).
*/
- check_gpte(cpu, gpte);
+ if (!check_gpte(cpu, gpte))
+ return false;
/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
gpte = pte_mkyoung(gpte);
@@ -441,7 +472,9 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
gpte = pte_mkdirty(gpte);
/* Get the pointer to the shadow PTE entry we're going to set. */
- spte = spte_addr(cpu, *spgd, vaddr);
+ spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
+ if (!spte)
+ return false;
/*
* If there was a valid shadow PTE entry here before, we release it.
@@ -493,29 +526,23 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
*/
static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
{
- pgd_t *spgd;
+ pte_t *spte;
unsigned long flags;
-#ifdef CONFIG_X86_PAE
- pmd_t *spmd;
-#endif
- /* Look at the current top level entry: is it present? */
- spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
- if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
+ /* You can't put your stack in the Switcher! */
+ if (vaddr >= switcher_addr)
return false;
-#ifdef CONFIG_X86_PAE
- spmd = spmd_addr(cpu, *spgd, vaddr);
- if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
+ /* If there's no shadow PTE, it's not writable. */
+ spte = find_spte(cpu, vaddr, false, 0, 0);
+ if (!spte)
return false;
-#endif
/*
* Check the flags on the pte entry itself: it must be present and
* writable.
*/
- flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
-
+ flags = pte_flags(*spte);
return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
}
@@ -678,9 +705,6 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
int *blank_pgdir)
{
unsigned int next;
-#ifdef CONFIG_X86_PAE
- pmd_t *pmd_table;
-#endif
/*
* We pick one entry at random to throw out. Choosing the Least
@@ -695,29 +719,11 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
if (!cpu->lg->pgdirs[next].pgdir)
next = cpu->cpu_pgd;
else {
-#ifdef CONFIG_X86_PAE
/*
- * In PAE mode, allocate a pmd page and populate the
- * last pgd entry.
+ * This is a blank page, so there are no kernel
+ * mappings: caller must map the stack!
*/
- pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
- if (!pmd_table) {
- free_page((long)cpu->lg->pgdirs[next].pgdir);
- set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
- next = cpu->cpu_pgd;
- } else {
- set_pgd(cpu->lg->pgdirs[next].pgdir +
- SWITCHER_PGD_INDEX,
- __pgd(__pa(pmd_table) | _PAGE_PRESENT));
- /*
- * This is a blank page, so there are no kernel
- * mappings: caller must map the stack!
- */
- *blank_pgdir = 1;
- }
-#else
*blank_pgdir = 1;
-#endif
}
}
/* Record which Guest toplevel this shadows. */
@@ -725,9 +731,50 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
/* Release all the non-kernel mappings. */
flush_user_mappings(cpu->lg, next);
+ /* This hasn't run on any CPU at all. */
+ cpu->lg->pgdirs[next].last_host_cpu = -1;
+
return next;
}
+/*H:501
+ * We do need the Switcher code mapped at all times, so we allocate that
+ * part of the Guest page table here. We map the Switcher code immediately,
+ * but defer mapping of the guest register page and IDT/LDT etc page until
+ * just before we run the guest in map_switcher_in_guest().
+ *
+ * We *could* do this setup in map_switcher_in_guest(), but at that point
+ * we've interrupts disabled, and allocating pages like that is fraught: we
+ * can't sleep if we need to free up some memory.
+ */
+static bool allocate_switcher_mapping(struct lg_cpu *cpu)
+{
+ int i;
+
+ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
+ pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
+ CHECK_GPGD_MASK, _PAGE_TABLE);
+ if (!pte)
+ return false;
+
+ /*
+ * Map the switcher page if not already there. It might
+ * already be there because we call allocate_switcher_mapping()
+ * in guest_set_pgd() just in case it did discard our Switcher
+ * mapping, but it probably didn't.
+ */
+ if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
+ /* Get a reference to the Switcher page. */
+ get_page(lg_switcher_pages[0]);
+ /* Create a read-only, exectuable, kernel-style PTE */
+ set_pte(pte,
+ mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
+ }
+ }
+ cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
+ return true;
+}
+
/*H:470
* Finally, a routine which throws away everything: all PGD entries in all
* the shadow page tables, including the Guest's kernel mappings. This is used
@@ -738,28 +785,16 @@ static void release_all_pagetables(struct lguest *lg)
unsigned int i, j;
/* Every shadow pagetable this Guest has */
- for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
- if (lg->pgdirs[i].pgdir) {
-#ifdef CONFIG_X86_PAE
- pgd_t *spgd;
- pmd_t *pmdpage;
- unsigned int k;
-
- /* Get the last pmd page. */
- spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
- pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
-
- /*
- * And release the pmd entries of that pmd page,
- * except for the switcher pmd.
- */
- for (k = 0; k < SWITCHER_PMD_INDEX; k++)
- release_pmd(&pmdpage[k]);
-#endif
- /* Every PGD entry except the Switcher at the top */
- for (j = 0; j < SWITCHER_PGD_INDEX; j++)
- release_pgd(lg->pgdirs[i].pgdir + j);
- }
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
+ if (!lg->pgdirs[i].pgdir)
+ continue;
+
+ /* Every PGD entry. */
+ for (j = 0; j < PTRS_PER_PGD; j++)
+ release_pgd(lg->pgdirs[i].pgdir + j);
+ lg->pgdirs[i].switcher_mapped = false;
+ lg->pgdirs[i].last_host_cpu = -1;
+ }
}
/*
@@ -773,6 +808,9 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu)
release_all_pagetables(cpu->lg);
/* We need the Guest kernel stack mapped again. */
pin_stack_pages(cpu);
+ /* And we need Switcher allocated. */
+ if (!allocate_switcher_mapping(cpu))
+ kill_guest(cpu, "Cannot populate switcher mapping");
}
/*H:430
@@ -808,9 +846,17 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
newpgdir = new_pgdir(cpu, pgtable, &repin);
/* Change the current pgd index to the new one. */
cpu->cpu_pgd = newpgdir;
- /* If it was completely blank, we map in the Guest kernel stack */
+ /*
+ * If it was completely blank, we map in the Guest kernel stack and
+ * the Switcher.
+ */
if (repin)
pin_stack_pages(cpu);
+
+ if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
+ if (!allocate_switcher_mapping(cpu))
+ kill_guest(cpu, "Cannot populate switcher mapping");
+ }
}
/*:*/
@@ -865,7 +911,8 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
* micro-benchmark.
*/
if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
- check_gpte(cpu, gpte);
+ if (!check_gpte(cpu, gpte))
+ return;
set_pte(spte,
gpte_to_spte(cpu, gpte,
pte_flags(gpte) & _PAGE_DIRTY));
@@ -897,6 +944,12 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
void guest_set_pte(struct lg_cpu *cpu,
unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
{
+ /* We don't let you remap the Switcher; we need it to get back! */
+ if (vaddr >= switcher_addr) {
+ kill_guest(cpu, "attempt to set pte into Switcher pages");
+ return;
+ }
+
/*
* Kernel mappings must be changed on all top levels. Slow, but doesn't
* happen often.
@@ -933,14 +986,23 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
{
int pgdir;
- if (idx >= SWITCHER_PGD_INDEX)
+ if (idx > PTRS_PER_PGD) {
+ kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
+ idx, PTRS_PER_PGD);
return;
+ }
/* If they're talking about a page table we have a shadow for... */
pgdir = find_pgdir(lg, gpgdir);
- if (pgdir < ARRAY_SIZE(lg->pgdirs))
+ if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
/* ... throw it away. */
release_pgd(lg->pgdirs[pgdir].pgdir + idx);
+ /* That might have been the Switcher mapping, remap it. */
+ if (!allocate_switcher_mapping(&lg->cpus[0])) {
+ kill_guest(&lg->cpus[0],
+ "Cannot populate switcher mapping");
+ }
+ }
}
#ifdef CONFIG_X86_PAE
@@ -958,6 +1020,9 @@ void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
* we will populate on future faults. The Guest doesn't have any actual
* pagetables yet, so we set linear_pages to tell demand_page() to fake it
* for the moment.
+ *
+ * We do need the Switcher to be mapped at all times, so we allocate that
+ * part of the Guest page table here.
*/
int init_guest_pagetable(struct lguest *lg)
{
@@ -971,21 +1036,34 @@ int init_guest_pagetable(struct lguest *lg)
/* We start with a linear mapping until the initialize. */
cpu->linear_pages = true;
+
+ /* Allocate the page tables for the Switcher. */
+ if (!allocate_switcher_mapping(cpu)) {
+ release_all_pagetables(lg);
+ return -ENOMEM;
+ }
+
return 0;
}
/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
void page_table_guest_data_init(struct lg_cpu *cpu)
{
+ /*
+ * We tell the Guest that it can't use the virtual addresses
+ * used by the Switcher. This trick is equivalent to 4GB -
+ * switcher_addr.
+ */
+ u32 top = ~switcher_addr + 1;
+
/* We get the kernel address: above this is all kernel memory. */
if (get_user(cpu->lg->kernel_address,
- &cpu->lg->lguest_data->kernel_address)
+ &cpu->lg->lguest_data->kernel_address)
/*
- * We tell the Guest that it can't use the top 2 or 4 MB
- * of virtual addresses used by the Switcher.
+ * We tell the Guest that it can't use the top virtual
+ * addresses (used by the Switcher).
*/
- || put_user(RESERVE_MEM * 1024 * 1024,
- &cpu->lg->lguest_data->reserve_mem)) {
+ || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
return;
}
@@ -995,12 +1073,7 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
* "pgd_index(lg->kernel_address)". This assumes it won't hit the
* Switcher mappings, so check that now.
*/
-#ifdef CONFIG_X86_PAE
- if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
- pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
-#else
- if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
-#endif
+ if (cpu->lg->kernel_address >= switcher_addr)
kill_guest(cpu, "bad kernel address %#lx",
cpu->lg->kernel_address);
}
@@ -1017,102 +1090,96 @@ void free_guest_pagetable(struct lguest *lg)
free_page((long)lg->pgdirs[i].pgdir);
}
-/*H:480
- * (vi) Mapping the Switcher when the Guest is about to run.
- *
- * The Switcher and the two pages for this CPU need to be visible in the
- * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
- * for each CPU already set up, we just need to hook them in now we know which
- * Guest is about to run on this CPU.
+/*H:481
+ * This clears the Switcher mappings for cpu #i.
*/
-void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
+static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
{
- pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
- pte_t regs_pte;
+ unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
+ pte_t *pte;
-#ifdef CONFIG_X86_PAE
- pmd_t switcher_pmd;
- pmd_t *pmd_table;
-
- switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT,
- PAGE_KERNEL_EXEC);
-
- /* Figure out where the pmd page is, by reading the PGD, and converting
- * it to a virtual address. */
- pmd_table = __va(pgd_pfn(cpu->lg->
- pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
- << PAGE_SHIFT);
- /* Now write it into the shadow page table. */
- set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
-#else
- pgd_t switcher_pgd;
+ /* Clear the mappings for both pages. */
+ pte = find_spte(cpu, base, false, 0, 0);
+ release_pte(*pte);
+ set_pte(pte, __pte(0));
- /*
- * Make the last PGD entry for this Guest point to the Switcher's PTE
- * page for this CPU (with appropriate flags).
- */
- switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
-
- cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
-
-#endif
- /*
- * We also change the Switcher PTE page. When we're running the Guest,
- * we want the Guest's "regs" page to appear where the first Switcher
- * page for this CPU is. This is an optimization: when the Switcher
- * saves the Guest registers, it saves them into the first page of this
- * CPU's "struct lguest_pages": if we make sure the Guest's register
- * page is already mapped there, we don't have to copy them out
- * again.
- */
- regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
- set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
+ pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
+ release_pte(*pte);
+ set_pte(pte, __pte(0));
}
-/*:*/
-static void free_switcher_pte_pages(void)
-{
- unsigned int i;
-
- for_each_possible_cpu(i)
- free_page((long)switcher_pte_page(i));
-}
-
-/*H:520
- * Setting up the Switcher PTE page for given CPU is fairly easy, given
- * the CPU number and the "struct page"s for the Switcher code itself.
+/*H:480
+ * (vi) Mapping the Switcher when the Guest is about to run.
+ *
+ * The Switcher and the two pages for this CPU need to be visible in the Guest
+ * (and not the pages for other CPUs).
*
- * Currently the Switcher is less than a page long, so "pages" is always 1.
+ * The pages for the pagetables have all been allocated before: we just need
+ * to make sure the actual PTEs are up-to-date for the CPU we're about to run
+ * on.
*/
-static __init void populate_switcher_pte_page(unsigned int cpu,
- struct page *switcher_page[],
- unsigned int pages)
+void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
- unsigned int i;
- pte_t *pte = switcher_pte_page(cpu);
+ unsigned long base;
+ struct page *percpu_switcher_page, *regs_page;
+ pte_t *pte;
+ struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
+
+ /* Switcher page should always be mapped by now! */
+ BUG_ON(!pgdir->switcher_mapped);
+
+ /*
+ * Remember that we have two pages for each Host CPU, so we can run a
+ * Guest on each CPU without them interfering. We need to make sure
+ * those pages are mapped correctly in the Guest, but since we usually
+ * run on the same CPU, we cache that, and only update the mappings
+ * when we move.
+ */
+ if (pgdir->last_host_cpu == raw_smp_processor_id())
+ return;
- /* The first entries are easy: they map the Switcher code. */
- for (i = 0; i < pages; i++) {
- set_pte(&pte[i], mk_pte(switcher_page[i],
- __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
+ /* -1 means unknown so we remove everything. */
+ if (pgdir->last_host_cpu == -1) {
+ unsigned int i;
+ for_each_possible_cpu(i)
+ remove_switcher_percpu_map(cpu, i);
+ } else {
+ /* We know exactly what CPU mapping to remove. */
+ remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
}
- /* The only other thing we map is this CPU's pair of pages. */
- i = pages + cpu*2;
-
- /* First page (Guest registers) is writable from the Guest */
- set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
- __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
+ /*
+ * When we're running the Guest, we want the Guest's "regs" page to
+ * appear where the first Switcher page for this CPU is. This is an
+ * optimization: when the Switcher saves the Guest registers, it saves
+ * them into the first page of this CPU's "struct lguest_pages": if we
+ * make sure the Guest's register page is already mapped there, we
+ * don't have to copy them out again.
+ */
+ /* Find the shadow PTE for this regs page. */
+ base = switcher_addr + PAGE_SIZE
+ + raw_smp_processor_id() * sizeof(struct lguest_pages);
+ pte = find_spte(cpu, base, false, 0, 0);
+ regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
+ get_page(regs_page);
+ set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
/*
- * The second page contains the "struct lguest_ro_state", and is
- * read-only.
+ * We map the second page of the struct lguest_pages read-only in
+ * the Guest: the IDT, GDT and other things it's not supposed to
+ * change.
*/
- set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
- __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
+ pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
+ percpu_switcher_page
+ = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
+ get_page(percpu_switcher_page);
+ set_pte(pte, mk_pte(percpu_switcher_page,
+ __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
+
+ pgdir->last_host_cpu = raw_smp_processor_id();
}
-/*
+/*H:490
* We've made it through the page table code. Perhaps our tired brains are
* still processing the details, or perhaps we're simply glad it's over.
*
@@ -1124,29 +1191,3 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
*
* There is just one file remaining in the Host.
*/
-
-/*H:510
- * At boot or module load time, init_pagetables() allocates and populates
- * the Switcher PTE page for each CPU.
- */
-__init int init_pagetables(struct page **switcher_page, unsigned int pages)
-{
- unsigned int i;
-
- for_each_possible_cpu(i) {
- switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
- if (!switcher_pte_page(i)) {
- free_switcher_pte_pages();
- return -ENOMEM;
- }
- populate_switcher_pte_page(i, switcher_page, pages);
- }
- return 0;
-}
-/*:*/
-
-/* Cleaning up simply involves freeing the PTE page for each CPU. */
-void free_pagetables(void)
-{
- free_switcher_pte_pages();
-}
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 4af12e1..f0a3347 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -59,14 +59,13 @@ static struct {
/* Offset from where switcher.S was compiled to where we've copied it */
static unsigned long switcher_offset(void)
{
- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
+ return switcher_addr - (unsigned long)start_switcher_text;
}
-/* This cpu's struct lguest_pages. */
+/* This cpu's struct lguest_pages (after the Switcher text page) */
static struct lguest_pages *lguest_pages(unsigned int cpu)
{
- return &(((struct lguest_pages *)
- (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
+ return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
}
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 9c6b964..b3b2d36 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -120,11 +120,7 @@ static void smu_start_cmd(void)
DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
cmd->data_len);
- DPRINTK("SMU: data buffer: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
- ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3],
- ((u8 *)cmd->data_buf)[4], ((u8 *)cmd->data_buf)[5],
- ((u8 *)cmd->data_buf)[6], ((u8 *)cmd->data_buf)[7]);
+ DPRINTK("SMU: data buffer: %8ph\n", cmd->data_buf);
/* Fill the SMU command buffer */
smu->cmd_buf->cmd = cmd->cmd;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 22b8ce4..283e1b5 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -750,8 +750,9 @@ done_battery_state_smart(struct adb_request* req)
voltage = (req->reply[8] << 8) | req->reply[9];
break;
default:
- printk(KERN_WARNING "pmu.c : unrecognized battery info, len: %d, %02x %02x %02x %02x\n",
- req->reply_len, req->reply[0], req->reply[1], req->reply[2], req->reply[3]);
+ pr_warn("pmu.c: unrecognized battery info, "
+ "len: %d, %4ph\n", req->reply_len,
+ req->reply);
break;
}
}
@@ -869,7 +870,7 @@ static int pmu_battery_proc_show(struct seq_file *m, void *v)
static int pmu_battery_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, pmu_battery_proc_show, PDE(inode)->data);
+ return single_open(file, pmu_battery_proc_show, PDE_DATA(inode));
}
static const struct file_operations pmu_battery_proc_fops = {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4d8d90b..3bfc8f1 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -174,6 +174,8 @@ config MD_FAULTY
In unsure, say N.
+source "drivers/md/bcache/Kconfig"
+
config BLK_DEV_DM
tristate "Device mapper support"
---help---
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 7ceeaef..1439fd4 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
+obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
new file mode 100644
index 0000000..05c220d
--- /dev/null
+++ b/drivers/md/bcache/Kconfig
@@ -0,0 +1,42 @@
+
+config BCACHE
+ tristate "Block device as cache"
+ select CLOSURES
+ ---help---
+ Allows a block device to be used as cache for other devices; uses
+ a btree for indexing and the layout is optimized for SSDs.
+
+ See Documentation/bcache.txt for details.
+
+config BCACHE_DEBUG
+ bool "Bcache debugging"
+ depends on BCACHE
+ ---help---
+ Don't select this option unless you're a developer
+
+ Enables extra debugging tools (primarily a fuzz tester)
+
+config BCACHE_EDEBUG
+ bool "Extended runtime checks"
+ depends on BCACHE
+ ---help---
+ Don't select this option unless you're a developer
+
+ Enables extra runtime checks which significantly affect performance
+
+config BCACHE_CLOSURES_DEBUG
+ bool "Debug closures"
+ depends on BCACHE
+ select DEBUG_FS
+ ---help---
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+
+# cgroup code needs to be updated:
+#
+#config CGROUP_BCACHE
+# bool "Cgroup controls for bcache"
+# depends on BCACHE && BLK_CGROUP
+# ---help---
+# TODO
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
new file mode 100644
index 0000000..0e9c825
--- /dev/null
+++ b/drivers/md/bcache/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_BCACHE) += bcache.o
+
+bcache-y := alloc.o btree.o bset.o io.o journal.o writeback.o\
+ movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+
+CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
new file mode 100644
index 0000000..048f294
--- /dev/null
+++ b/drivers/md/bcache/alloc.c
@@ -0,0 +1,599 @@
+/*
+ * Primary bucket allocation code
+ *
+ * Copyright 2012 Google, Inc.
+ *
+ * Allocation in bcache is done in terms of buckets:
+ *
+ * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
+ * btree pointers - they must match for the pointer to be considered valid.
+ *
+ * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
+ * bucket simply by incrementing its gen.
+ *
+ * The gens (along with the priorities; it's really the gens are important but
+ * the code is named as if it's the priorities) are written in an arbitrary list
+ * of buckets on disk, with a pointer to them in the journal header.
+ *
+ * When we invalidate a bucket, we have to write its new gen to disk and wait
+ * for that write to complete before we use it - otherwise after a crash we
+ * could have pointers that appeared to be good but pointed to data that had
+ * been overwritten.
+ *
+ * Since the gens and priorities are all stored contiguously on disk, we can
+ * batch this up: We fill up the free_inc list with freshly invalidated buckets,
+ * call prio_write(), and when prio_write() finishes we pull buckets off the
+ * free_inc list and optionally discard them.
+ *
+ * free_inc isn't the only freelist - if it was, we'd often to sleep while
+ * priorities and gens were being written before we could allocate. c->free is a
+ * smaller freelist, and buckets on that list are always ready to be used.
+ *
+ * If we've got discards enabled, that happens when a bucket moves from the
+ * free_inc list to the free list.
+ *
+ * There is another freelist, because sometimes we have buckets that we know
+ * have nothing pointing into them - these we can reuse without waiting for
+ * priorities to be rewritten. These come from freed btree nodes and buckets
+ * that garbage collection discovered no longer had valid keys pointing into
+ * them (because they were overwritten). That's the unused list - buckets on the
+ * unused list move to the free list, optionally being discarded in the process.
+ *
+ * It's also important to ensure that gens don't wrap around - with respect to
+ * either the oldest gen in the btree or the gen on disk. This is quite
+ * difficult to do in practice, but we explicitly guard against it anyways - if
+ * a bucket is in danger of wrapping around we simply skip invalidating it that
+ * time around, and we garbage collect or rewrite the priorities sooner than we
+ * would have otherwise.
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+ * bch_bucket_alloc_set() allocates one or more buckets from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+ * from bch_bucket_alloc() and a few other places that need to make sure free
+ * buckets are ready.
+ *
+ * invalidate_buckets_(lru|fifo)() find buckets that are available to be
+ * invalidated, and then invalidate them and stick them on the free_inc list -
+ * in either lru or fifo order.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+
+#include <linux/random.h>
+
+#define MAX_IN_FLIGHT_DISCARDS 8U
+
+/* Bucket heap / gen */
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
+{
+ uint8_t ret = ++b->gen;
+
+ ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
+ WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
+
+ if (CACHE_SYNC(&ca->set->sb)) {
+ ca->need_save_prio = max(ca->need_save_prio,
+ bucket_disk_gen(b));
+ WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
+ }
+
+ return ret;
+}
+
+void bch_rescale_priorities(struct cache_set *c, int sectors)
+{
+ struct cache *ca;
+ struct bucket *b;
+ unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
+ unsigned i;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+
+ do {
+ r = atomic_read(&c->rescale);
+
+ if (r >= 0)
+ return;
+ } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
+
+ mutex_lock(&c->bucket_lock);
+
+ c->min_prio = USHRT_MAX;
+
+ for_each_cache(ca, c, i)
+ for_each_bucket(b, ca)
+ if (b->prio &&
+ b->prio != BTREE_PRIO &&
+ !atomic_read(&b->pin)) {
+ b->prio--;
+ c->min_prio = min(c->min_prio, b->prio);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+/* Discard/TRIM */
+
+struct discard {
+ struct list_head list;
+ struct work_struct work;
+ struct cache *ca;
+ long bucket;
+
+ struct bio bio;
+ struct bio_vec bv;
+};
+
+static void discard_finish(struct work_struct *w)
+{
+ struct discard *d = container_of(w, struct discard, work);
+ struct cache *ca = d->ca;
+ char buf[BDEVNAME_SIZE];
+
+ if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
+ pr_notice("discard error on %s, disabling",
+ bdevname(ca->bdev, buf));
+ d->ca->discard = 0;
+ }
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ fifo_push(&ca->free, d->bucket);
+ list_add(&d->list, &ca->discards);
+ atomic_dec(&ca->discards_in_flight);
+
+ mutex_unlock(&ca->set->bucket_lock);
+
+ closure_wake_up(&ca->set->bucket_wait);
+ wake_up(&ca->set->alloc_wait);
+
+ closure_put(&ca->set->cl);
+}
+
+static void discard_endio(struct bio *bio, int error)
+{
+ struct discard *d = container_of(bio, struct discard, bio);
+ schedule_work(&d->work);
+}
+
+static void do_discard(struct cache *ca, long bucket)
+{
+ struct discard *d = list_first_entry(&ca->discards,
+ struct discard, list);
+
+ list_del(&d->list);
+ d->bucket = bucket;
+
+ atomic_inc(&ca->discards_in_flight);
+ closure_get(&ca->set->cl);
+
+ bio_init(&d->bio);
+
+ d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket);
+ d->bio.bi_bdev = ca->bdev;
+ d->bio.bi_rw = REQ_WRITE|REQ_DISCARD;
+ d->bio.bi_max_vecs = 1;
+ d->bio.bi_io_vec = d->bio.bi_inline_vecs;
+ d->bio.bi_size = bucket_bytes(ca);
+ d->bio.bi_end_io = discard_endio;
+ bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ submit_bio(0, &d->bio);
+}
+
+/* Allocation */
+
+static inline bool can_inc_bucket_gen(struct bucket *b)
+{
+ return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
+ bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
+}
+
+bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
+{
+ BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
+
+ if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
+ CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
+ return false;
+
+ b->prio = 0;
+
+ if (can_inc_bucket_gen(b) &&
+ fifo_push(&ca->unused, b - ca->buckets)) {
+ atomic_inc(&b->pin);
+ return true;
+ }
+
+ return false;
+}
+
+static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
+{
+ return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
+ !atomic_read(&b->pin) &&
+ can_inc_bucket_gen(b);
+}
+
+static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+ bch_inc_gen(ca, b);
+ b->prio = INITIAL_PRIO;
+ atomic_inc(&b->pin);
+ fifo_push(&ca->free_inc, b - ca->buckets);
+}
+
+#define bucket_prio(b) \
+ (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+
+#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
+#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
+
+static void invalidate_buckets_lru(struct cache *ca)
+{
+ struct bucket *b;
+ ssize_t i;
+
+ ca->heap.used = 0;
+
+ for_each_bucket(b, ca) {
+ /*
+ * If we fill up the unused list, if we then return before
+ * adding anything to the free_inc list we'll skip writing
+ * prios/gens and just go back to allocating from the unused
+ * list:
+ */
+ if (fifo_full(&ca->unused))
+ return;
+
+ if (!can_invalidate_bucket(ca, b))
+ continue;
+
+ if (!GC_SECTORS_USED(b) &&
+ bch_bucket_add_unused(ca, b))
+ continue;
+
+ if (!heap_full(&ca->heap))
+ heap_add(&ca->heap, b, bucket_max_cmp);
+ else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+ ca->heap.data[0] = b;
+ heap_sift(&ca->heap, 0, bucket_max_cmp);
+ }
+ }
+
+ for (i = ca->heap.used / 2 - 1; i >= 0; --i)
+ heap_sift(&ca->heap, i, bucket_min_cmp);
+
+ while (!fifo_full(&ca->free_inc)) {
+ if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+ /*
+ * We don't want to be calling invalidate_buckets()
+ * multiple times when it can't do anything
+ */
+ ca->invalidate_needs_gc = 1;
+ bch_queue_gc(ca->set);
+ return;
+ }
+
+ invalidate_one_bucket(ca, b);
+ }
+}
+
+static void invalidate_buckets_fifo(struct cache *ca)
+{
+ struct bucket *b;
+ size_t checked = 0;
+
+ while (!fifo_full(&ca->free_inc)) {
+ if (ca->fifo_last_bucket < ca->sb.first_bucket ||
+ ca->fifo_last_bucket >= ca->sb.nbuckets)
+ ca->fifo_last_bucket = ca->sb.first_bucket;
+
+ b = ca->buckets + ca->fifo_last_bucket++;
+
+ if (can_invalidate_bucket(ca, b))
+ invalidate_one_bucket(ca, b);
+
+ if (++checked >= ca->sb.nbuckets) {
+ ca->invalidate_needs_gc = 1;
+ bch_queue_gc(ca->set);
+ return;
+ }
+ }
+}
+
+static void invalidate_buckets_random(struct cache *ca)
+{
+ struct bucket *b;
+ size_t checked = 0;
+
+ while (!fifo_full(&ca->free_inc)) {
+ size_t n;
+ get_random_bytes(&n, sizeof(n));
+
+ n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
+ n += ca->sb.first_bucket;
+
+ b = ca->buckets + n;
+
+ if (can_invalidate_bucket(ca, b))
+ invalidate_one_bucket(ca, b);
+
+ if (++checked >= ca->sb.nbuckets / 2) {
+ ca->invalidate_needs_gc = 1;
+ bch_queue_gc(ca->set);
+ return;
+ }
+ }
+}
+
+static void invalidate_buckets(struct cache *ca)
+{
+ if (ca->invalidate_needs_gc)
+ return;
+
+ switch (CACHE_REPLACEMENT(&ca->sb)) {
+ case CACHE_REPLACEMENT_LRU:
+ invalidate_buckets_lru(ca);
+ break;
+ case CACHE_REPLACEMENT_FIFO:
+ invalidate_buckets_fifo(ca);
+ break;
+ case CACHE_REPLACEMENT_RANDOM:
+ invalidate_buckets_random(ca);
+ break;
+ }
+
+ pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu",
+ fifo_used(&ca->free), ca->free.size,
+ fifo_used(&ca->free_inc), ca->free_inc.size,
+ fifo_used(&ca->unused), ca->unused.size);
+}
+
+#define allocator_wait(ca, cond) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ while (1) { \
+ prepare_to_wait(&ca->set->alloc_wait, \
+ &__wait, TASK_INTERRUPTIBLE); \
+ if (cond) \
+ break; \
+ \
+ mutex_unlock(&(ca)->set->bucket_lock); \
+ if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \
+ finish_wait(&ca->set->alloc_wait, &__wait); \
+ closure_return(cl); \
+ } \
+ \
+ schedule(); \
+ mutex_lock(&(ca)->set->bucket_lock); \
+ } \
+ \
+ finish_wait(&ca->set->alloc_wait, &__wait); \
+} while (0)
+
+void bch_allocator_thread(struct closure *cl)
+{
+ struct cache *ca = container_of(cl, struct cache, alloc);
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ while (1) {
+ /*
+ * First, we pull buckets off of the unused and free_inc lists,
+ * possibly issue discards to them, then we add the bucket to
+ * the free list:
+ */
+ while (1) {
+ long bucket;
+
+ if ((!atomic_read(&ca->set->prio_blocked) ||
+ !CACHE_SYNC(&ca->set->sb)) &&
+ !fifo_empty(&ca->unused))
+ fifo_pop(&ca->unused, bucket);
+ else if (!fifo_empty(&ca->free_inc))
+ fifo_pop(&ca->free_inc, bucket);
+ else
+ break;
+
+ allocator_wait(ca, (int) fifo_free(&ca->free) >
+ atomic_read(&ca->discards_in_flight));
+
+ if (ca->discard) {
+ allocator_wait(ca, !list_empty(&ca->discards));
+ do_discard(ca, bucket);
+ } else {
+ fifo_push(&ca->free, bucket);
+ closure_wake_up(&ca->set->bucket_wait);
+ }
+ }
+
+ /*
+ * We've run out of free buckets, we need to find some buckets
+ * we can invalidate. First, invalidate them in memory and add
+ * them to the free_inc list:
+ */
+
+ allocator_wait(ca, ca->set->gc_mark_valid &&
+ (ca->need_save_prio > 64 ||
+ !ca->invalidate_needs_gc));
+ invalidate_buckets(ca);
+
+ /*
+ * Now, we write their new gens to disk so we can start writing
+ * new stuff to them:
+ */
+ allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
+ if (CACHE_SYNC(&ca->set->sb) &&
+ (!fifo_empty(&ca->free_inc) ||
+ ca->need_save_prio > 64))
+ bch_prio_write(ca);
+ }
+}
+
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+{
+ long r = -1;
+again:
+ wake_up(&ca->set->alloc_wait);
+
+ if (fifo_used(&ca->free) > ca->watermark[watermark] &&
+ fifo_pop(&ca->free, r)) {
+ struct bucket *b = ca->buckets + r;
+#ifdef CONFIG_BCACHE_EDEBUG
+ size_t iter;
+ long i;
+
+ for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
+ BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
+
+ fifo_for_each(i, &ca->free, iter)
+ BUG_ON(i == r);
+ fifo_for_each(i, &ca->free_inc, iter)
+ BUG_ON(i == r);
+ fifo_for_each(i, &ca->unused, iter)
+ BUG_ON(i == r);
+#endif
+ BUG_ON(atomic_read(&b->pin) != 1);
+
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+ if (watermark <= WATERMARK_METADATA) {
+ SET_GC_MARK(b, GC_MARK_METADATA);
+ b->prio = BTREE_PRIO;
+ } else {
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ b->prio = INITIAL_PRIO;
+ }
+
+ return r;
+ }
+
+ pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu",
+ atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
+ fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+
+ if (cl) {
+ closure_wait(&ca->set->bucket_wait, cl);
+
+ if (closure_blocking(cl)) {
+ mutex_unlock(&ca->set->bucket_lock);
+ closure_sync(cl);
+ mutex_lock(&ca->set->bucket_lock);
+ goto again;
+ }
+ }
+
+ return -1;
+}
+
+void bch_bucket_free(struct cache_set *c, struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ struct bucket *b = PTR_BUCKET(c, k, i);
+
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_SECTORS_USED(b, 0);
+ bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
+ }
+}
+
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+ struct bkey *k, int n, struct closure *cl)
+{
+ int i;
+
+ lockdep_assert_held(&c->bucket_lock);
+ BUG_ON(!n || n > c->caches_loaded || n > 8);
+
+ bkey_init(k);
+
+ /* sort by free space/prio of oldest data in caches */
+
+ for (i = 0; i < n; i++) {
+ struct cache *ca = c->cache_by_alloc[i];
+ long b = bch_bucket_alloc(ca, watermark, cl);
+
+ if (b == -1)
+ goto err;
+
+ k->ptr[i] = PTR(ca->buckets[b].gen,
+ bucket_to_sector(c, b),
+ ca->sb.nr_this_dev);
+
+ SET_KEY_PTRS(k, i + 1);
+ }
+
+ return 0;
+err:
+ bch_bucket_free(c, k);
+ __bkey_put(c, k);
+ return -1;
+}
+
+int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+ struct bkey *k, int n, struct closure *cl)
+{
+ int ret;
+ mutex_lock(&c->bucket_lock);
+ ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
+/* Init */
+
+void bch_cache_allocator_exit(struct cache *ca)
+{
+ struct discard *d;
+
+ while (!list_empty(&ca->discards)) {
+ d = list_first_entry(&ca->discards, struct discard, list);
+ cancel_work_sync(&d->work);
+ list_del(&d->list);
+ kfree(d);
+ }
+}
+
+int bch_cache_allocator_init(struct cache *ca)
+{
+ unsigned i;
+
+ /*
+ * Reserve:
+ * Prio/gen writes first
+ * Then 8 for btree allocations
+ * Then half for the moving garbage collector
+ */
+
+ ca->watermark[WATERMARK_PRIO] = 0;
+
+ ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
+
+ ca->watermark[WATERMARK_MOVINGGC] = 8 +
+ ca->watermark[WATERMARK_METADATA];
+
+ ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
+ ca->watermark[WATERMARK_MOVINGGC];
+
+ for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
+ struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->ca = ca;
+ INIT_WORK(&d->work, discard_finish);
+ list_add(&d->list, &ca->discards);
+ }
+
+ return 0;
+}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
new file mode 100644
index 0000000..340146d
--- /dev/null
+++ b/drivers/md/bcache/bcache.h
@@ -0,0 +1,1259 @@
+#ifndef _BCACHE_H
+#define _BCACHE_H
+
+/*
+ * SOME HIGH LEVEL CODE DOCUMENTATION:
+ *
+ * Bcache mostly works with cache sets, cache devices, and backing devices.
+ *
+ * Support for multiple cache devices hasn't quite been finished off yet, but
+ * it's about 95% plumbed through. A cache set and its cache devices is sort of
+ * like a md raid array and its component devices. Most of the code doesn't care
+ * about individual cache devices, the main abstraction is the cache set.
+ *
+ * Multiple cache devices is intended to give us the ability to mirror dirty
+ * cached data and metadata, without mirroring clean cached data.
+ *
+ * Backing devices are different, in that they have a lifetime independent of a
+ * cache set. When you register a newly formatted backing device it'll come up
+ * in passthrough mode, and then you can attach and detach a backing device from
+ * a cache set at runtime - while it's mounted and in use. Detaching implicitly
+ * invalidates any cached data for that backing device.
+ *
+ * A cache set can have multiple (many) backing devices attached to it.
+ *
+ * There's also flash only volumes - this is the reason for the distinction
+ * between struct cached_dev and struct bcache_device. A flash only volume
+ * works much like a bcache device that has a backing device, except the
+ * "cached" data is always dirty. The end result is that we get thin
+ * provisioning with very little additional code.
+ *
+ * Flash only volumes work but they're not production ready because the moving
+ * garbage collector needs more work. More on that later.
+ *
+ * BUCKETS/ALLOCATION:
+ *
+ * Bcache is primarily designed for caching, which means that in normal
+ * operation all of our available space will be allocated. Thus, we need an
+ * efficient way of deleting things from the cache so we can write new things to
+ * it.
+ *
+ * To do this, we first divide the cache device up into buckets. A bucket is the
+ * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
+ * works efficiently.
+ *
+ * Each bucket has a 16 bit priority, and an 8 bit generation associated with
+ * it. The gens and priorities for all the buckets are stored contiguously and
+ * packed on disk (in a linked list of buckets - aside from the superblock, all
+ * of bcache's metadata is stored in buckets).
+ *
+ * The priority is used to implement an LRU. We reset a bucket's priority when
+ * we allocate it or on cache it, and every so often we decrement the priority
+ * of each bucket. It could be used to implement something more sophisticated,
+ * if anyone ever gets around to it.
+ *
+ * The generation is used for invalidating buckets. Each pointer also has an 8
+ * bit generation embedded in it; for a pointer to be considered valid, its gen
+ * must match the gen of the bucket it points into. Thus, to reuse a bucket all
+ * we have to do is increment its gen (and write its new gen to disk; we batch
+ * this up).
+ *
+ * Bcache is entirely COW - we never write twice to a bucket, even buckets that
+ * contain metadata (including btree nodes).
+ *
+ * THE BTREE:
+ *
+ * Bcache is in large part design around the btree.
+ *
+ * At a high level, the btree is just an index of key -> ptr tuples.
+ *
+ * Keys represent extents, and thus have a size field. Keys also have a variable
+ * number of pointers attached to them (potentially zero, which is handy for
+ * invalidating the cache).
+ *
+ * The key itself is an inode:offset pair. The inode number corresponds to a
+ * backing device or a flash only volume. The offset is the ending offset of the
+ * extent within the inode - not the starting offset; this makes lookups
+ * slightly more convenient.
+ *
+ * Pointers contain the cache device id, the offset on that device, and an 8 bit
+ * generation number. More on the gen later.
+ *
+ * Index lookups are not fully abstracted - cache lookups in particular are
+ * still somewhat mixed in with the btree code, but things are headed in that
+ * direction.
+ *
+ * Updates are fairly well abstracted, though. There are two different ways of
+ * updating the btree; insert and replace.
+ *
+ * BTREE_INSERT will just take a list of keys and insert them into the btree -
+ * overwriting (possibly only partially) any extents they overlap with. This is
+ * used to update the index after a write.
+ *
+ * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
+ * overwriting a key that matches another given key. This is used for inserting
+ * data into the cache after a cache miss, and for background writeback, and for
+ * the moving garbage collector.
+ *
+ * There is no "delete" operation; deleting things from the index is
+ * accomplished by either by invalidating pointers (by incrementing a bucket's
+ * gen) or by inserting a key with 0 pointers - which will overwrite anything
+ * previously present at that location in the index.
+ *
+ * This means that there are always stale/invalid keys in the btree. They're
+ * filtered out by the code that iterates through a btree node, and removed when
+ * a btree node is rewritten.
+ *
+ * BTREE NODES:
+ *
+ * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
+ * free smaller than a bucket - so, that's how big our btree nodes are.
+ *
+ * (If buckets are really big we'll only use part of the bucket for a btree node
+ * - no less than 1/4th - but a bucket still contains no more than a single
+ * btree node. I'd actually like to change this, but for now we rely on the
+ * bucket's gen for deleting btree nodes when we rewrite/split a node.)
+ *
+ * Anyways, btree nodes are big - big enough to be inefficient with a textbook
+ * btree implementation.
+ *
+ * The way this is solved is that btree nodes are internally log structured; we
+ * can append new keys to an existing btree node without rewriting it. This
+ * means each set of keys we write is sorted, but the node is not.
+ *
+ * We maintain this log structure in memory - keeping 1Mb of keys sorted would
+ * be expensive, and we have to distinguish between the keys we have written and
+ * the keys we haven't. So to do a lookup in a btree node, we have to search
+ * each sorted set. But we do merge written sets together lazily, so the cost of
+ * these extra searches is quite low (normally most of the keys in a btree node
+ * will be in one big set, and then there'll be one or two sets that are much
+ * smaller).
+ *
+ * This log structure makes bcache's btree more of a hybrid between a
+ * conventional btree and a compacting data structure, with some of the
+ * advantages of both.
+ *
+ * GARBAGE COLLECTION:
+ *
+ * We can't just invalidate any bucket - it might contain dirty data or
+ * metadata. If it once contained dirty data, other writes might overwrite it
+ * later, leaving no valid pointers into that bucket in the index.
+ *
+ * Thus, the primary purpose of garbage collection is to find buckets to reuse.
+ * It also counts how much valid data it each bucket currently contains, so that
+ * allocation can reuse buckets sooner when they've been mostly overwritten.
+ *
+ * It also does some things that are really internal to the btree
+ * implementation. If a btree node contains pointers that are stale by more than
+ * some threshold, it rewrites the btree node to avoid the bucket's generation
+ * wrapping around. It also merges adjacent btree nodes if they're empty enough.
+ *
+ * THE JOURNAL:
+ *
+ * Bcache's journal is not necessary for consistency; we always strictly
+ * order metadata writes so that the btree and everything else is consistent on
+ * disk in the event of an unclean shutdown, and in fact bcache had writeback
+ * caching (with recovery from unclean shutdown) before journalling was
+ * implemented.
+ *
+ * Rather, the journal is purely a performance optimization; we can't complete a
+ * write until we've updated the index on disk, otherwise the cache would be
+ * inconsistent in the event of an unclean shutdown. This means that without the
+ * journal, on random write workloads we constantly have to update all the leaf
+ * nodes in the btree, and those writes will be mostly empty (appending at most
+ * a few keys each) - highly inefficient in terms of amount of metadata writes,
+ * and it puts more strain on the various btree resorting/compacting code.
+ *
+ * The journal is just a log of keys we've inserted; on startup we just reinsert
+ * all the keys in the open journal entries. That means that when we're updating
+ * a node in the btree, we can wait until a 4k block of keys fills up before
+ * writing them out.
+ *
+ * For simplicity, we only journal updates to leaf nodes; updates to parent
+ * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
+ * the complexity to deal with journalling them (in particular, journal replay)
+ * - updates to non leaf nodes just happen synchronously (see btree_split()).
+ */
+
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+
+#include <linux/bio.h>
+#include <linux/blktrace_api.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "util.h"
+#include "closure.h"
+
+struct bucket {
+ atomic_t pin;
+ uint16_t prio;
+ uint8_t gen;
+ uint8_t disk_gen;
+ uint8_t last_gc; /* Most out of date gen in the btree */
+ uint8_t gc_gen;
+ uint16_t gc_mark;
+};
+
+/*
+ * I'd use bitfields for these, but I don't trust the compiler not to screw me
+ * as multiple threads touch struct bucket without locking
+ */
+
+BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
+#define GC_MARK_RECLAIMABLE 0
+#define GC_MARK_DIRTY 1
+#define GC_MARK_METADATA 2
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+
+struct bkey {
+ uint64_t high;
+ uint64_t low;
+ uint64_t ptr[];
+};
+
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD 8
+
+#define BKEY_PADDED(key) \
+ union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_MAX_VERSION 4
+
+#define SB_SECTOR 8
+#define SB_SIZE 4096
+#define SB_LABEL_SIZE 32
+#define SB_JOURNAL_BUCKETS 256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET 8
+
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+
+struct cache_sb {
+ uint64_t csum;
+ uint64_t offset; /* sector where this sb was written */
+ uint64_t version;
+
+ uint8_t magic[16];
+
+ uint8_t uuid[16];
+ union {
+ uint8_t set_uuid[16];
+ uint64_t set_magic;
+ };
+ uint8_t label[SB_LABEL_SIZE];
+
+ uint64_t flags;
+ uint64_t seq;
+ uint64_t pad[8];
+
+ union {
+ struct {
+ /* Cache devices */
+ uint64_t nbuckets; /* device size */
+
+ uint16_t block_size; /* sectors */
+ uint16_t bucket_size; /* sectors */
+
+ uint16_t nr_in_set;
+ uint16_t nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ uint64_t data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ uint32_t last_mount; /* time_t */
+
+ uint16_t first_bucket;
+ union {
+ uint16_t njournal_buckets;
+ uint16_t keys;
+ };
+ uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU 0U
+#define CACHE_REPLACEMENT_FIFO 1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH 0U
+#define CACHE_MODE_WRITEBACK 1U
+#define CACHE_MODE_WRITEAROUND 2U
+#define CACHE_MODE_NONE 3U
+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE 0U
+#define BDEV_STATE_CLEAN 1U
+#define BDEV_STATE_DIRTY 2U
+#define BDEV_STATE_STALE 3U
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_VERSION 1
+
+/*
+ * This is the on disk format for btree nodes - a btree node on disk is a list
+ * of these; within each set the keys are sorted
+ */
+struct bset {
+ uint64_t csum;
+ uint64_t magic;
+ uint64_t seq;
+ uint32_t version;
+ uint32_t keys;
+
+ union {
+ struct bkey start[0];
+ uint64_t d[0];
+ };
+};
+
+/*
+ * On disk format for priorities and gens - see super.c near prio_write() for
+ * more.
+ */
+struct prio_set {
+ uint64_t csum;
+ uint64_t magic;
+ uint64_t seq;
+ uint32_t version;
+ uint32_t pad;
+
+ uint64_t next_bucket;
+
+ struct bucket_disk {
+ uint16_t prio;
+ uint8_t gen;
+ } __attribute((packed)) data[];
+};
+
+struct uuid_entry {
+ union {
+ struct {
+ uint8_t uuid[16];
+ uint8_t label[32];
+ uint32_t first_reg;
+ uint32_t last_reg;
+ uint32_t invalidated;
+
+ uint32_t flags;
+ /* Size of flash only volumes */
+ uint64_t sectors;
+ };
+
+ uint8_t pad[128];
+ };
+};
+
+BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+
+#include "journal.h"
+#include "stats.h"
+struct search;
+struct btree;
+struct keybuf;
+
+struct keybuf_key {
+ struct rb_node node;
+ BKEY_PADDED(key);
+ void *private;
+};
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
+
+struct keybuf {
+ keybuf_pred_fn *key_predicate;
+
+ struct bkey last_scanned;
+ spinlock_t lock;
+
+ /*
+ * Beginning and end of range in rb tree - so that we can skip taking
+ * lock and checking the rb tree when we need to check for overlapping
+ * keys.
+ */
+ struct bkey start;
+ struct bkey end;
+
+ struct rb_root keys;
+
+#define KEYBUF_NR 100
+ DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
+};
+
+struct bio_split_pool {
+ struct bio_set *bio_split;
+ mempool_t *bio_split_hook;
+};
+
+struct bio_split_hook {
+ struct closure cl;
+ struct bio_split_pool *p;
+ struct bio *bio;
+ bio_end_io_t *bi_end_io;
+ void *bi_private;
+};
+
+struct bcache_device {
+ struct closure cl;
+
+ struct kobject kobj;
+
+ struct cache_set *c;
+ unsigned id;
+#define BCACHEDEVNAME_SIZE 12
+ char name[BCACHEDEVNAME_SIZE];
+
+ struct gendisk *disk;
+
+ /* If nonzero, we're closing */
+ atomic_t closing;
+
+ /* If nonzero, we're detaching/unregistering from cache set */
+ atomic_t detaching;
+
+ atomic_long_t sectors_dirty;
+ unsigned long sectors_dirty_gc;
+ unsigned long sectors_dirty_last;
+ long sectors_dirty_derivative;
+
+ mempool_t *unaligned_bvec;
+ struct bio_set *bio_split;
+
+ unsigned data_csum:1;
+
+ int (*cache_miss)(struct btree *, struct search *,
+ struct bio *, unsigned);
+ int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
+
+ struct bio_split_pool bio_split_hook;
+};
+
+struct io {
+ /* Used to track sequential IO so it can be skipped */
+ struct hlist_node hash;
+ struct list_head lru;
+
+ unsigned long jiffies;
+ unsigned sequential;
+ sector_t last;
+};
+
+struct cached_dev {
+ struct list_head list;
+ struct bcache_device disk;
+ struct block_device *bdev;
+
+ struct cache_sb sb;
+ struct bio sb_bio;
+ struct bio_vec sb_bv[1];
+ struct closure_with_waitlist sb_write;
+
+ /* Refcount on the cache set. Always nonzero when we're caching. */
+ atomic_t count;
+ struct work_struct detach;
+
+ /*
+ * Device might not be running if it's dirty and the cache set hasn't
+ * showed up yet.
+ */
+ atomic_t running;
+
+ /*
+ * Writes take a shared lock from start to finish; scanning for dirty
+ * data to refill the rb tree requires an exclusive lock.
+ */
+ struct rw_semaphore writeback_lock;
+
+ /*
+ * Nonzero, and writeback has a refcount (d->count), iff there is dirty
+ * data in the cache. Protected by writeback_lock; must have an
+ * shared lock to set and exclusive lock to clear.
+ */
+ atomic_t has_dirty;
+
+ struct ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
+
+ /*
+ * Internal to the writeback code, so read_dirty() can keep track of
+ * where it's at.
+ */
+ sector_t last_read;
+
+ /* Number of writeback bios in flight */
+ atomic_t in_flight;
+ struct closure_with_timer writeback;
+ struct closure_waitlist writeback_wait;
+
+ struct keybuf writeback_keys;
+
+ /* For tracking sequential IO */
+#define RECENT_IO_BITS 7
+#define RECENT_IO (1 << RECENT_IO_BITS)
+ struct io io[RECENT_IO];
+ struct hlist_head io_hash[RECENT_IO + 1];
+ struct list_head io_lru;
+ spinlock_t io_lock;
+
+ struct cache_accounting accounting;
+
+ /* The rest of this all shows up in sysfs */
+ unsigned sequential_cutoff;
+ unsigned readahead;
+
+ unsigned sequential_merge:1;
+ unsigned verify:1;
+
+ unsigned writeback_metadata:1;
+ unsigned writeback_running:1;
+ unsigned char writeback_percent;
+ unsigned writeback_delay;
+
+ int writeback_rate_change;
+ int64_t writeback_rate_derivative;
+ uint64_t writeback_rate_target;
+
+ unsigned writeback_rate_update_seconds;
+ unsigned writeback_rate_d_term;
+ unsigned writeback_rate_p_term_inverse;
+ unsigned writeback_rate_d_smooth;
+};
+
+enum alloc_watermarks {
+ WATERMARK_PRIO,
+ WATERMARK_METADATA,
+ WATERMARK_MOVINGGC,
+ WATERMARK_NONE,
+ WATERMARK_MAX
+};
+
+struct cache {
+ struct cache_set *set;
+ struct cache_sb sb;
+ struct bio sb_bio;
+ struct bio_vec sb_bv[1];
+
+ struct kobject kobj;
+ struct block_device *bdev;
+
+ unsigned watermark[WATERMARK_MAX];
+
+ struct closure alloc;
+ struct workqueue_struct *alloc_workqueue;
+
+ struct closure prio;
+ struct prio_set *disk_buckets;
+
+ /*
+ * When allocating new buckets, prio_write() gets first dibs - since we
+ * may not be allocate at all without writing priorities and gens.
+ * prio_buckets[] contains the last buckets we wrote priorities to (so
+ * gc can mark them as metadata), prio_next[] contains the buckets
+ * allocated for the next prio write.
+ */
+ uint64_t *prio_buckets;
+ uint64_t *prio_last_buckets;
+
+ /*
+ * free: Buckets that are ready to be used
+ *
+ * free_inc: Incoming buckets - these are buckets that currently have
+ * cached data in them, and we can't reuse them until after we write
+ * their new gen to disk. After prio_write() finishes writing the new
+ * gens/prios, they'll be moved to the free list (and possibly discarded
+ * in the process)
+ *
+ * unused: GC found nothing pointing into these buckets (possibly
+ * because all the data they contained was overwritten), so we only
+ * need to discard them before they can be moved to the free list.
+ */
+ DECLARE_FIFO(long, free);
+ DECLARE_FIFO(long, free_inc);
+ DECLARE_FIFO(long, unused);
+
+ size_t fifo_last_bucket;
+
+ /* Allocation stuff: */
+ struct bucket *buckets;
+
+ DECLARE_HEAP(struct bucket *, heap);
+
+ /*
+ * max(gen - disk_gen) for all buckets. When it gets too big we have to
+ * call prio_write() to keep gens from wrapping.
+ */
+ uint8_t need_save_prio;
+ unsigned gc_move_threshold;
+
+ /*
+ * If nonzero, we know we aren't going to find any buckets to invalidate
+ * until a gc finishes - otherwise we could pointlessly burn a ton of
+ * cpu
+ */
+ unsigned invalidate_needs_gc:1;
+
+ bool discard; /* Get rid of? */
+
+ /*
+ * We preallocate structs for issuing discards to buckets, and keep them
+ * on this list when they're not in use; do_discard() issues discards
+ * whenever there's work to do and is called by free_some_buckets() and
+ * when a discard finishes.
+ */
+ atomic_t discards_in_flight;
+ struct list_head discards;
+
+ struct journal_device journal;
+
+ /* The rest of this all shows up in sysfs */
+#define IO_ERROR_SHIFT 20
+ atomic_t io_errors;
+ atomic_t io_count;
+
+ atomic_long_t meta_sectors_written;
+ atomic_long_t btree_sectors_written;
+ atomic_long_t sectors_written;
+
+ struct bio_split_pool bio_split_hook;
+};
+
+struct gc_stat {
+ size_t nodes;
+ size_t key_bytes;
+
+ size_t nkeys;
+ uint64_t data; /* sectors */
+ uint64_t dirty; /* sectors */
+ unsigned in_use; /* percent */
+};
+
+/*
+ * Flag bits, for how the cache set is shutting down, and what phase it's at:
+ *
+ * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
+ * all the backing devices first (their cached data gets invalidated, and they
+ * won't automatically reattach).
+ *
+ * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
+ * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
+ * flushing dirty data).
+ *
+ * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
+ * the allocation thread.
+ */
+#define CACHE_SET_UNREGISTERING 0
+#define CACHE_SET_STOPPING 1
+#define CACHE_SET_STOPPING_2 2
+
+struct cache_set {
+ struct closure cl;
+
+ struct list_head list;
+ struct kobject kobj;
+ struct kobject internal;
+ struct dentry *debug;
+ struct cache_accounting accounting;
+
+ unsigned long flags;
+
+ struct cache_sb sb;
+
+ struct cache *cache[MAX_CACHES_PER_SET];
+ struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
+ int caches_loaded;
+
+ struct bcache_device **devices;
+ struct list_head cached_devs;
+ uint64_t cached_dev_sectors;
+ struct closure caching;
+
+ struct closure_with_waitlist sb_write;
+
+ mempool_t *search;
+ mempool_t *bio_meta;
+ struct bio_set *bio_split;
+
+ /* For the btree cache */
+ struct shrinker shrink;
+
+ /* For the allocator itself */
+ wait_queue_head_t alloc_wait;
+
+ /* For the btree cache and anything allocation related */
+ struct mutex bucket_lock;
+
+ /* log2(bucket_size), in sectors */
+ unsigned short bucket_bits;
+
+ /* log2(block_size), in sectors */
+ unsigned short block_bits;
+
+ /*
+ * Default number of pages for a new btree node - may be less than a
+ * full bucket
+ */
+ unsigned btree_pages;
+
+ /*
+ * Lists of struct btrees; lru is the list for structs that have memory
+ * allocated for actual btree node, freed is for structs that do not.
+ *
+ * We never free a struct btree, except on shutdown - we just put it on
+ * the btree_cache_freed list and reuse it later. This simplifies the
+ * code, and it doesn't cost us much memory as the memory usage is
+ * dominated by buffers that hold the actual btree node data and those
+ * can be freed - and the number of struct btrees allocated is
+ * effectively bounded.
+ *
+ * btree_cache_freeable effectively is a small cache - we use it because
+ * high order page allocations can be rather expensive, and it's quite
+ * common to delete and allocate btree nodes in quick succession. It
+ * should never grow past ~2-3 nodes in practice.
+ */
+ struct list_head btree_cache;
+ struct list_head btree_cache_freeable;
+ struct list_head btree_cache_freed;
+
+ /* Number of elements in btree_cache + btree_cache_freeable lists */
+ unsigned bucket_cache_used;
+
+ /*
+ * If we need to allocate memory for a new btree node and that
+ * allocation fails, we can cannibalize another node in the btree cache
+ * to satisfy the allocation. However, only one thread can be doing this
+ * at a time, for obvious reasons - try_harder and try_wait are
+ * basically a lock for this that we can wait on asynchronously. The
+ * btree_root() macro releases the lock when it returns.
+ */
+ struct closure *try_harder;
+ struct closure_waitlist try_wait;
+ uint64_t try_harder_start;
+
+ /*
+ * When we free a btree node, we increment the gen of the bucket the
+ * node is in - but we can't rewrite the prios and gens until we
+ * finished whatever it is we were doing, otherwise after a crash the
+ * btree node would be freed but for say a split, we might not have the
+ * pointers to the new nodes inserted into the btree yet.
+ *
+ * This is a refcount that blocks prio_write() until the new keys are
+ * written.
+ */
+ atomic_t prio_blocked;
+ struct closure_waitlist bucket_wait;
+
+ /*
+ * For any bio we don't skip we subtract the number of sectors from
+ * rescale; when it hits 0 we rescale all the bucket priorities.
+ */
+ atomic_t rescale;
+ /*
+ * When we invalidate buckets, we use both the priority and the amount
+ * of good data to determine which buckets to reuse first - to weight
+ * those together consistently we keep track of the smallest nonzero
+ * priority of any bucket.
+ */
+ uint16_t min_prio;
+
+ /*
+ * max(gen - gc_gen) for all buckets. When it gets too big we have to gc
+ * to keep gens from wrapping around.
+ */
+ uint8_t need_gc;
+ struct gc_stat gc_stats;
+ size_t nbuckets;
+
+ struct closure_with_waitlist gc;
+ /* Where in the btree gc currently is */
+ struct bkey gc_done;
+
+ /*
+ * The allocation code needs gc_mark in struct bucket to be correct, but
+ * it's not while a gc is in progress. Protected by bucket_lock.
+ */
+ int gc_mark_valid;
+
+ /* Counts how many sectors bio_insert has added to the cache */
+ atomic_t sectors_to_gc;
+
+ struct closure moving_gc;
+ struct closure_waitlist moving_gc_wait;
+ struct keybuf moving_gc_keys;
+ /* Number of moving GC bios in flight */
+ atomic_t in_flight;
+
+ struct btree *root;
+
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree *verify_data;
+ struct mutex verify_lock;
+#endif
+
+ unsigned nr_uuids;
+ struct uuid_entry *uuids;
+ BKEY_PADDED(uuid_bucket);
+ struct closure_with_waitlist uuid_write;
+
+ /*
+ * A btree node on disk could have too many bsets for an iterator to fit
+ * on the stack - this is a single element mempool for btree_read_work()
+ */
+ struct mutex fill_lock;
+ struct btree_iter *fill_iter;
+
+ /*
+ * btree_sort() is a merge sort and requires temporary space - single
+ * element mempool
+ */
+ struct mutex sort_lock;
+ struct bset *sort;
+
+ /* List of buckets we're currently writing data to */
+ struct list_head data_buckets;
+ spinlock_t data_bucket_lock;
+
+ struct journal journal;
+
+#define CONGESTED_MAX 1024
+ unsigned congested_last_us;
+ atomic_t congested;
+
+ /* The rest of this all shows up in sysfs */
+ unsigned congested_read_threshold_us;
+ unsigned congested_write_threshold_us;
+
+ spinlock_t sort_time_lock;
+ struct time_stats sort_time;
+ struct time_stats btree_gc_time;
+ struct time_stats btree_split_time;
+ spinlock_t btree_read_time_lock;
+ struct time_stats btree_read_time;
+ struct time_stats try_harder_time;
+
+ atomic_long_t cache_read_races;
+ atomic_long_t writeback_keys_done;
+ atomic_long_t writeback_keys_failed;
+ unsigned error_limit;
+ unsigned error_decay;
+ unsigned short journal_delay_ms;
+ unsigned verify:1;
+ unsigned key_merging_disabled:1;
+ unsigned gc_always_rewrite:1;
+ unsigned shrinker_disabled:1;
+ unsigned copy_gc_enabled:1;
+
+#define BUCKET_HASH_BITS 12
+ struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
+};
+
+static inline bool key_merging_disabled(struct cache_set *c)
+{
+#ifdef CONFIG_BCACHE_DEBUG
+ return c->key_merging_disabled;
+#else
+ return 0;
+#endif
+}
+
+static inline bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+ return sb->version == BCACHE_SB_VERSION_BDEV
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
+}
+
+struct bbio {
+ unsigned submit_time_us;
+ union {
+ struct bkey key;
+ uint64_t _pad[3];
+ /*
+ * We only need pad = 3 here because we only ever carry around a
+ * single pointer - i.e. the pointer we're doing io to/from.
+ */
+ };
+ struct bio bio;
+};
+
+static inline unsigned local_clock_us(void)
+{
+ return local_clock() >> 10;
+}
+
+#define MAX_BSETS 4U
+
+#define BTREE_PRIO USHRT_MAX
+#define INITIAL_PRIO 32768
+
+#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
+#define btree_blocks(b) \
+ ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
+
+#define btree_default_blocks(c) \
+ ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
+#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
+#define block_bytes(c) ((c)->sb.block_size << 9)
+
+#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i) __set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
+#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
+
+#define node(i, j) ((struct bkey *) ((i)->d + (j)))
+#define end(i) node(i, (i)->keys)
+
+#define index(i, b) \
+ ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \
+ block_bytes(b->c)))
+
+#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
+
+#define prios_per_bucket(c) \
+ ((bucket_bytes(c) - sizeof(struct prio_set)) / \
+ sizeof(struct bucket_disk))
+#define prio_buckets(c) \
+ DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
+
+#define JSET_MAGIC 0x245235c1a3625032ULL
+#define PSET_MAGIC 0x6750e15f87337f91ULL
+#define BSET_MAGIC 0x90135c78b99e07f5ULL
+
+#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
+#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
+#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
+
+/* Bkey fields: all units are in sectors */
+
+#define KEY_FIELD(name, field, offset, size) \
+ BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size) \
+ static inline uint64_t name(const struct bkey *k, unsigned i) \
+ { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
+ \
+ static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
+ { \
+ k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
+ k->ptr[i] |= v << offset; \
+ }
+
+KEY_FIELD(KEY_PTRS, high, 60, 3)
+KEY_FIELD(HEADER_SIZE, high, 58, 2)
+KEY_FIELD(KEY_CSUM, high, 56, 2)
+KEY_FIELD(KEY_PINNED, high, 55, 1)
+KEY_FIELD(KEY_DIRTY, high, 36, 1)
+
+KEY_FIELD(KEY_SIZE, high, 20, 16)
+KEY_FIELD(KEY_INODE, high, 0, 20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline uint64_t KEY_OFFSET(const struct bkey *k)
+{
+ return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
+{
+ k->low = v;
+}
+
+PTR_FIELD(PTR_DEV, 51, 12)
+PTR_FIELD(PTR_OFFSET, 8, 43)
+PTR_FIELD(PTR_GEN, 0, 8)
+
+#define PTR_CHECK_DEV ((1 << 12) - 1)
+
+#define PTR(gen, offset, dev) \
+ ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
+
+static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
+{
+ return s >> c->bucket_bits;
+}
+
+static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
+{
+ return ((sector_t) b) << c->bucket_bits;
+}
+
+static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
+{
+ return s & (c->sb.bucket_size - 1);
+}
+
+static inline struct cache *PTR_CACHE(struct cache_set *c,
+ const struct bkey *k,
+ unsigned ptr)
+{
+ return c->cache[PTR_DEV(k, ptr)];
+}
+
+static inline size_t PTR_BUCKET_NR(struct cache_set *c,
+ const struct bkey *k,
+ unsigned ptr)
+{
+ return sector_to_bucket(c, PTR_OFFSET(k, ptr));
+}
+
+static inline struct bucket *PTR_BUCKET(struct cache_set *c,
+ const struct bkey *k,
+ unsigned ptr)
+{
+ return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
+}
+
+/* Btree key macros */
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(dev, sector, len) \
+((struct bkey) { \
+ .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
+ .low = (sector) \
+})
+
+static inline void bkey_init(struct bkey *k)
+{
+ *k = KEY(0, 0, 0);
+}
+
+#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
+#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
+#define ZERO_KEY KEY(0, 0, 0)
+
+/*
+ * This is used for various on disk data structures - cache_sb, prio_set, bset,
+ * jset: The checksum is _always_ the first 8 bytes of these structs
+ */
+#define csum_set(i) \
+ bch_crc64(((void *) (i)) + sizeof(uint64_t), \
+ ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+
+/* Error handling macros */
+
+#define btree_bug(b, ...) \
+do { \
+ if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
+ dump_stack(); \
+} while (0)
+
+#define cache_bug(c, ...) \
+do { \
+ if (bch_cache_set_error(c, __VA_ARGS__)) \
+ dump_stack(); \
+} while (0)
+
+#define btree_bug_on(cond, b, ...) \
+do { \
+ if (cond) \
+ btree_bug(b, __VA_ARGS__); \
+} while (0)
+
+#define cache_bug_on(cond, c, ...) \
+do { \
+ if (cond) \
+ cache_bug(c, __VA_ARGS__); \
+} while (0)
+
+#define cache_set_err_on(cond, c, ...) \
+do { \
+ if (cond) \
+ bch_cache_set_error(c, __VA_ARGS__); \
+} while (0)
+
+/* Looping macros */
+
+#define for_each_cache(ca, cs, iter) \
+ for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
+
+#define for_each_bucket(b, ca) \
+ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
+ b < (ca)->buckets + (ca)->sb.nbuckets; b++)
+
+static inline void __bkey_put(struct cache_set *c, struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
+}
+
+/* Blktrace macros */
+
+#define blktrace_msg(c, fmt, ...) \
+do { \
+ struct request_queue *q = bdev_get_queue(c->bdev); \
+ if (q) \
+ blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define blktrace_msg_all(s, fmt, ...) \
+do { \
+ struct cache *_c; \
+ unsigned i; \
+ for_each_cache(_c, (s), i) \
+ blktrace_msg(_c, fmt, ##__VA_ARGS__); \
+} while (0)
+
+static inline void cached_dev_put(struct cached_dev *dc)
+{
+ if (atomic_dec_and_test(&dc->count))
+ schedule_work(&dc->detach);
+}
+
+static inline bool cached_dev_get(struct cached_dev *dc)
+{
+ if (!atomic_inc_not_zero(&dc->count))
+ return false;
+
+ /* Paired with the mb in cached_dev_attach */
+ smp_mb__after_atomic_inc();
+ return true;
+}
+
+/*
+ * bucket_gc_gen() returns the difference between the bucket's current gen and
+ * the oldest gen of any pointer into that bucket in the btree (last_gc).
+ *
+ * bucket_disk_gen() returns the difference between the current gen and the gen
+ * on disk; they're both used to make sure gens don't wrap around.
+ */
+
+static inline uint8_t bucket_gc_gen(struct bucket *b)
+{
+ return b->gen - b->last_gc;
+}
+
+static inline uint8_t bucket_disk_gen(struct bucket *b)
+{
+ return b->gen - b->disk_gen;
+}
+
+#define BUCKET_GC_GEN_MAX 96U
+#define BUCKET_DISK_GEN_MAX 64U
+
+#define kobj_attribute_write(n, fn) \
+ static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
+
+#define kobj_attribute_rw(n, show, store) \
+ static struct kobj_attribute ksysfs_##n = \
+ __ATTR(n, S_IWUSR|S_IRUSR, show, store)
+
+/* Forward declarations */
+
+void bch_writeback_queue(struct cached_dev *);
+void bch_writeback_add(struct cached_dev *, unsigned);
+
+void bch_count_io_errors(struct cache *, int, const char *);
+void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
+ int, const char *);
+void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
+void bch_bbio_free(struct bio *, struct cache_set *);
+struct bio *bch_bbio_alloc(struct cache_set *);
+
+struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
+void bch_generic_make_request(struct bio *, struct bio_split_pool *);
+void __bch_submit_bbio(struct bio *, struct cache_set *);
+void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
+
+uint8_t bch_inc_gen(struct cache *, struct bucket *);
+void bch_rescale_priorities(struct cache_set *, int);
+bool bch_bucket_add_unused(struct cache *, struct bucket *);
+void bch_allocator_thread(struct closure *);
+
+long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
+void bch_bucket_free(struct cache_set *, struct bkey *);
+
+int __bch_bucket_alloc_set(struct cache_set *, unsigned,
+ struct bkey *, int, struct closure *);
+int bch_bucket_alloc_set(struct cache_set *, unsigned,
+ struct bkey *, int, struct closure *);
+
+__printf(2, 3)
+bool bch_cache_set_error(struct cache_set *, const char *, ...);
+
+void bch_prio_write(struct cache *);
+void bch_write_bdev_super(struct cached_dev *, struct closure *);
+
+extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
+extern const char * const bch_cache_modes[];
+extern struct mutex bch_register_lock;
+extern struct list_head bch_cache_sets;
+
+extern struct kobj_type bch_cached_dev_ktype;
+extern struct kobj_type bch_flash_dev_ktype;
+extern struct kobj_type bch_cache_set_ktype;
+extern struct kobj_type bch_cache_set_internal_ktype;
+extern struct kobj_type bch_cache_ktype;
+
+void bch_cached_dev_release(struct kobject *);
+void bch_flash_dev_release(struct kobject *);
+void bch_cache_set_release(struct kobject *);
+void bch_cache_release(struct kobject *);
+
+int bch_uuid_write(struct cache_set *);
+void bcache_write_super(struct cache_set *);
+
+int bch_flash_dev_create(struct cache_set *c, uint64_t size);
+
+int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
+void bch_cached_dev_detach(struct cached_dev *);
+void bch_cached_dev_run(struct cached_dev *);
+void bcache_device_stop(struct bcache_device *);
+
+void bch_cache_set_unregister(struct cache_set *);
+void bch_cache_set_stop(struct cache_set *);
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *);
+void bch_btree_cache_free(struct cache_set *);
+int bch_btree_cache_alloc(struct cache_set *);
+void bch_writeback_init_cached_dev(struct cached_dev *);
+void bch_moving_init_cache_set(struct cache_set *);
+
+void bch_cache_allocator_exit(struct cache *ca);
+int bch_cache_allocator_init(struct cache *ca);
+
+void bch_debug_exit(void);
+int bch_debug_init(struct kobject *);
+void bch_writeback_exit(void);
+int bch_writeback_init(void);
+void bch_request_exit(void);
+int bch_request_init(void);
+void bch_btree_exit(void);
+int bch_btree_init(void);
+
+#endif /* _BCACHE_H */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
new file mode 100644
index 0000000..cb4578a
--- /dev/null
+++ b/drivers/md/bcache/bset.c
@@ -0,0 +1,1192 @@
+/*
+ * Code for working with individual keys, and sorted sets of keys with in a
+ * btree node
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+
+#include <linux/random.h>
+#include <linux/prefetch.h>
+
+/* Keylists */
+
+void bch_keylist_copy(struct keylist *dest, struct keylist *src)
+{
+ *dest = *src;
+
+ if (src->list == src->d) {
+ size_t n = (uint64_t *) src->top - src->d;
+ dest->top = (struct bkey *) &dest->d[n];
+ dest->list = dest->d;
+ }
+}
+
+int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+{
+ unsigned oldsize = (uint64_t *) l->top - l->list;
+ unsigned newsize = oldsize + 2 + nptrs;
+ uint64_t *new;
+
+ /* The journalling code doesn't handle the case where the keys to insert
+ * is bigger than an empty write: If we just return -ENOMEM here,
+ * bio_insert() and bio_invalidate() will insert the keys created so far
+ * and finish the rest when the keylist is empty.
+ */
+ if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+ return -ENOMEM;
+
+ newsize = roundup_pow_of_two(newsize);
+
+ if (newsize <= KEYLIST_INLINE ||
+ roundup_pow_of_two(oldsize) == newsize)
+ return 0;
+
+ new = krealloc(l->list == l->d ? NULL : l->list,
+ sizeof(uint64_t) * newsize, GFP_NOIO);
+
+ if (!new)
+ return -ENOMEM;
+
+ if (l->list == l->d)
+ memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+
+ l->list = new;
+ l->top = (struct bkey *) (&l->list[oldsize]);
+
+ return 0;
+}
+
+struct bkey *bch_keylist_pop(struct keylist *l)
+{
+ struct bkey *k = l->bottom;
+
+ if (k == l->top)
+ return NULL;
+
+ while (bkey_next(k) != l->top)
+ k = bkey_next(k);
+
+ return l->top = k;
+}
+
+/* Pointer validation */
+
+bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
+{
+ unsigned i;
+
+ if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
+ goto bad;
+
+ if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+ bucket < ca->sb.first_bucket ||
+ bucket >= ca->sb.nbuckets)
+ goto bad;
+ }
+
+ return false;
+bad:
+ cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k));
+ return true;
+}
+
+bool bch_ptr_bad(struct btree *b, const struct bkey *k)
+{
+ struct bucket *g;
+ unsigned i, stale;
+
+ if (!bkey_cmp(k, &ZERO_KEY) ||
+ !KEY_PTRS(k) ||
+ bch_ptr_invalid(b, k))
+ return true;
+
+ if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(b->c, k, i)) {
+ g = PTR_BUCKET(b->c, k, i);
+ stale = ptr_stale(b->c, k, i);
+
+ btree_bug_on(stale > 96, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
+
+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+ b, "stale dirty pointer");
+
+ if (stale)
+ return true;
+
+#ifdef CONFIG_BCACHE_EDEBUG
+ if (!mutex_trylock(&b->c->bucket_lock))
+ continue;
+
+ if (b->level) {
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto bug;
+
+ } else {
+ if (g->prio == BTREE_PRIO)
+ goto bug;
+
+ if (KEY_DIRTY(k) &&
+ b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_DIRTY)
+ goto bug;
+ }
+ mutex_unlock(&b->c->bucket_lock);
+#endif
+ }
+
+ return false;
+#ifdef CONFIG_BCACHE_EDEBUG
+bug:
+ mutex_unlock(&b->c->bucket_lock);
+ btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ return true;
+#endif
+}
+
+/* Key/pointer manipulation */
+
+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+ unsigned i)
+{
+ BUG_ON(i > KEY_PTRS(src));
+
+ /* Only copy the header, key, and one pointer. */
+ memcpy(dest, src, 2 * sizeof(uint64_t));
+ dest->ptr[0] = src->ptr[i];
+ SET_KEY_PTRS(dest, 1);
+ /* We didn't copy the checksum so clear that bit. */
+ SET_KEY_CSUM(dest, 0);
+}
+
+bool __bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+ unsigned i, len = 0;
+
+ if (bkey_cmp(where, &START_KEY(k)) <= 0)
+ return false;
+
+ if (bkey_cmp(where, k) < 0)
+ len = KEY_OFFSET(k) - KEY_OFFSET(where);
+ else
+ bkey_copy_key(k, where);
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
+
+ BUG_ON(len > KEY_SIZE(k));
+ SET_KEY_SIZE(k, len);
+ return true;
+}
+
+bool __bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+ unsigned len = 0;
+
+ if (bkey_cmp(where, k) >= 0)
+ return false;
+
+ BUG_ON(KEY_INODE(where) != KEY_INODE(k));
+
+ if (bkey_cmp(where, &START_KEY(k)) > 0)
+ len = KEY_OFFSET(where) - KEY_START(k);
+
+ bkey_copy_key(k, where);
+
+ BUG_ON(len > KEY_SIZE(k));
+ SET_KEY_SIZE(k, len);
+ return true;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+ return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+ ~((uint64_t)1 << 63);
+}
+
+/* Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+{
+ unsigned i;
+
+ if (key_merging_disabled(b->c))
+ return false;
+
+ if (KEY_PTRS(l) != KEY_PTRS(r) ||
+ KEY_DIRTY(l) != KEY_DIRTY(r) ||
+ bkey_cmp(l, &START_KEY(r)))
+ return false;
+
+ for (i = 0; i < KEY_PTRS(l); i++)
+ if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+ return false;
+
+ /* Keys with no pointers aren't restricted to one bucket and could
+ * overflow KEY_SIZE
+ */
+ if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+ SET_KEY_SIZE(l, USHRT_MAX);
+
+ bch_cut_front(l, r);
+ return false;
+ }
+
+ if (KEY_CSUM(l)) {
+ if (KEY_CSUM(r))
+ l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+ else
+ SET_KEY_CSUM(l, 0);
+ }
+
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+ SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+ return true;
+}
+
+/* Binary tree stuff for auxiliary search trees */
+
+static unsigned inorder_next(unsigned j, unsigned size)
+{
+ if (j * 2 + 1 < size) {
+ j = j * 2 + 1;
+
+ while (j * 2 < size)
+ j *= 2;
+ } else
+ j >>= ffz(j) + 1;
+
+ return j;
+}
+
+static unsigned inorder_prev(unsigned j, unsigned size)
+{
+ if (j * 2 < size) {
+ j = j * 2;
+
+ while (j * 2 + 1 < size)
+ j = j * 2 + 1;
+ } else
+ j >>= ffs(j);
+
+ return j;
+}
+
+/* I have no idea why this code works... and I'm the one who wrote it
+ *
+ * However, I do know what it does:
+ * Given a binary tree constructed in an array (i.e. how you normally implement
+ * a heap), it converts a node in the tree - referenced by array index - to the
+ * index it would have if you did an inorder traversal.
+ *
+ * Also tested for every j, size up to size somewhere around 6 million.
+ *
+ * The binary tree starts at array index 1, not 0
+ * extra is a function of size:
+ * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+ */
+static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
+{
+ unsigned b = fls(j);
+ unsigned shift = fls(size - 1) - b;
+
+ j ^= 1U << (b - 1);
+ j <<= 1;
+ j |= 1;
+ j <<= shift;
+
+ if (j > extra)
+ j -= (j - extra) >> 1;
+
+ return j;
+}
+
+static unsigned to_inorder(unsigned j, struct bset_tree *t)
+{
+ return __to_inorder(j, t->size, t->extra);
+}
+
+static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
+{
+ unsigned shift;
+
+ if (j > extra)
+ j += j - extra;
+
+ shift = ffs(j);
+
+ j >>= shift;
+ j |= roundup_pow_of_two(size) >> shift;
+
+ return j;
+}
+
+static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
+{
+ return __inorder_to_tree(j, t->size, t->extra);
+}
+
+#if 0
+void inorder_test(void)
+{
+ unsigned long done = 0;
+ ktime_t start = ktime_get();
+
+ for (unsigned size = 2;
+ size < 65536000;
+ size++) {
+ unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+ unsigned i = 1, j = rounddown_pow_of_two(size - 1);
+
+ if (!(size % 4096))
+ printk(KERN_NOTICE "loop %u, %llu per us\n", size,
+ done / ktime_us_delta(ktime_get(), start));
+
+ while (1) {
+ if (__inorder_to_tree(i, size, extra) != j)
+ panic("size %10u j %10u i %10u", size, j, i);
+
+ if (__to_inorder(j, size, extra) != i)
+ panic("size %10u j %10u i %10u", size, j, i);
+
+ if (j == rounddown_pow_of_two(size) - 1)
+ break;
+
+ BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
+
+ j = inorder_next(j, size);
+ i++;
+ }
+
+ done += size - 1;
+ }
+}
+#endif
+
+/*
+ * Cacheline/offset <-> bkey pointer arithmatic:
+ *
+ * t->tree is a binary search tree in an array; each node corresponds to a key
+ * in one cacheline in t->set (BSET_CACHELINE bytes).
+ *
+ * This means we don't have to store the full index of the key that a node in
+ * the binary tree points to; to_inorder() gives us the cacheline, and then
+ * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
+ *
+ * cacheline_to_bkey() and friends abstract out all the pointer arithmatic to
+ * make this work.
+ *
+ * To construct the bfloat for an arbitrary key we need to know what the key
+ * immediately preceding it is: we have to check if the two keys differ in the
+ * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
+ * of the previous key so we can walk backwards to it from t->tree[j]'s key.
+ */
+
+static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
+ unsigned offset)
+{
+ return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
+}
+
+static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+{
+ return ((void *) k - (void *) t->data) / BSET_CACHELINE;
+}
+
+static unsigned bkey_to_cacheline_offset(struct bkey *k)
+{
+ return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+}
+
+static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
+{
+ return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
+}
+
+static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
+{
+ return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
+}
+
+/*
+ * For the write set - the one we're currently inserting keys into - we don't
+ * maintain a full search tree, we just keep a simple lookup table in t->prev.
+ */
+static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
+{
+ return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
+}
+
+static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
+{
+#ifdef CONFIG_X86_64
+ asm("shrd %[shift],%[high],%[low]"
+ : [low] "+Rm" (low)
+ : [high] "R" (high),
+ [shift] "ci" (shift)
+ : "cc");
+#else
+ low >>= shift;
+ low |= (high << 1) << (63U - shift);
+#endif
+ return low;
+}
+
+static inline unsigned bfloat_mantissa(const struct bkey *k,
+ struct bkey_float *f)
+{
+ const uint64_t *p = &k->low - (f->exponent >> 6);
+ return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
+}
+
+static void make_bfloat(struct bset_tree *t, unsigned j)
+{
+ struct bkey_float *f = &t->tree[j];
+ struct bkey *m = tree_to_bkey(t, j);
+ struct bkey *p = tree_to_prev_bkey(t, j);
+
+ struct bkey *l = is_power_of_2(j)
+ ? t->data->start
+ : tree_to_prev_bkey(t, j >> ffs(j));
+
+ struct bkey *r = is_power_of_2(j + 1)
+ ? node(t->data, t->data->keys - bkey_u64s(&t->end))
+ : tree_to_bkey(t, j >> (ffz(j) + 1));
+
+ BUG_ON(m < l || m > r);
+ BUG_ON(bkey_next(p) != m);
+
+ if (KEY_INODE(l) != KEY_INODE(r))
+ f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
+ else
+ f->exponent = fls64(r->low ^ l->low);
+
+ f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
+
+ /*
+ * Setting f->exponent = 127 flags this node as failed, and causes the
+ * lookup code to fall back to comparing against the original key.
+ */
+
+ if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
+ f->mantissa = bfloat_mantissa(m, f) - 1;
+ else
+ f->exponent = 127;
+}
+
+static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+{
+ if (t != b->sets) {
+ unsigned j = roundup(t[-1].size,
+ 64 / sizeof(struct bkey_float));
+
+ t->tree = t[-1].tree + j;
+ t->prev = t[-1].prev + j;
+ }
+
+ while (t < b->sets + MAX_BSETS)
+ t++->size = 0;
+}
+
+static void bset_build_unwritten_tree(struct btree *b)
+{
+ struct bset_tree *t = b->sets + b->nsets;
+
+ bset_alloc_tree(b, t);
+
+ if (t->tree != b->sets->tree + bset_tree_space(b)) {
+ t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+ t->size = 1;
+ }
+}
+
+static void bset_build_written_tree(struct btree *b)
+{
+ struct bset_tree *t = b->sets + b->nsets;
+ struct bkey *k = t->data->start;
+ unsigned j, cacheline = 1;
+
+ bset_alloc_tree(b, t);
+
+ t->size = min_t(unsigned,
+ bkey_to_cacheline(t, end(t->data)),
+ b->sets->tree + bset_tree_space(b) - t->tree);
+
+ if (t->size < 2) {
+ t->size = 0;
+ return;
+ }
+
+ t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+
+ /* First we figure out where the first key in each cacheline is */
+ for (j = inorder_next(0, t->size);
+ j;
+ j = inorder_next(j, t->size)) {
+ while (bkey_to_cacheline(t, k) != cacheline)
+ k = bkey_next(k);
+
+ t->prev[j] = bkey_u64s(k);
+ k = bkey_next(k);
+ cacheline++;
+ t->tree[j].m = bkey_to_cacheline_offset(k);
+ }
+
+ while (bkey_next(k) != end(t->data))
+ k = bkey_next(k);
+
+ t->end = *k;
+
+ /* Then we build the tree */
+ for (j = inorder_next(0, t->size);
+ j;
+ j = inorder_next(j, t->size))
+ make_bfloat(t, j);
+}
+
+void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+{
+ struct bset_tree *t;
+ unsigned inorder, j = 1;
+
+ for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+ if (k < end(t->data))
+ goto found_set;
+
+ BUG();
+found_set:
+ if (!t->size || !bset_written(b, t))
+ return;
+
+ inorder = bkey_to_cacheline(t, k);
+
+ if (k == t->data->start)
+ goto fix_left;
+
+ if (bkey_next(k) == end(t->data)) {
+ t->end = *k;
+ goto fix_right;
+ }
+
+ j = inorder_to_tree(inorder, t);
+
+ if (j &&
+ j < t->size &&
+ k == tree_to_bkey(t, j))
+fix_left: do {
+ make_bfloat(t, j);
+ j = j * 2;
+ } while (j < t->size);
+
+ j = inorder_to_tree(inorder + 1, t);
+
+ if (j &&
+ j < t->size &&
+ k == tree_to_prev_bkey(t, j))
+fix_right: do {
+ make_bfloat(t, j);
+ j = j * 2 + 1;
+ } while (j < t->size);
+}
+
+void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+{
+ struct bset_tree *t = &b->sets[b->nsets];
+ unsigned shift = bkey_u64s(k);
+ unsigned j = bkey_to_cacheline(t, k);
+
+ /* We're getting called from btree_split() or btree_gc, just bail out */
+ if (!t->size)
+ return;
+
+ /* k is the key we just inserted; we need to find the entry in the
+ * lookup table for the first key that is strictly greater than k:
+ * it's either k's cacheline or the next one
+ */
+ if (j < t->size &&
+ table_to_bkey(t, j) <= k)
+ j++;
+
+ /* Adjust all the lookup table entries, and find a new key for any that
+ * have gotten too big
+ */
+ for (; j < t->size; j++) {
+ t->prev[j] += shift;
+
+ if (t->prev[j] > 7) {
+ k = table_to_bkey(t, j - 1);
+
+ while (k < cacheline_to_bkey(t, j, 0))
+ k = bkey_next(k);
+
+ t->prev[j] = bkey_to_cacheline_offset(k);
+ }
+ }
+
+ if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+ return;
+
+ /* Possibly add a new entry to the end of the lookup table */
+
+ for (k = table_to_bkey(t, t->size - 1);
+ k != end(t->data);
+ k = bkey_next(k))
+ if (t->size == bkey_to_cacheline(t, k)) {
+ t->prev[t->size] = bkey_to_cacheline_offset(k);
+ t->size++;
+ }
+}
+
+void bch_bset_init_next(struct btree *b)
+{
+ struct bset *i = write_block(b);
+
+ if (i != b->sets[0].data) {
+ b->sets[++b->nsets].data = i;
+ i->seq = b->sets[0].data->seq;
+ } else
+ get_random_bytes(&i->seq, sizeof(uint64_t));
+
+ i->magic = bset_magic(b->c);
+ i->version = 0;
+ i->keys = 0;
+
+ bset_build_unwritten_tree(b);
+}
+
+struct bset_search_iter {
+ struct bkey *l, *r;
+};
+
+static struct bset_search_iter bset_search_write_set(struct btree *b,
+ struct bset_tree *t,
+ const struct bkey *search)
+{
+ unsigned li = 0, ri = t->size;
+
+ BUG_ON(!b->nsets &&
+ t->size < bkey_to_cacheline(t, end(t->data)));
+
+ while (li + 1 != ri) {
+ unsigned m = (li + ri) >> 1;
+
+ if (bkey_cmp(table_to_bkey(t, m), search) > 0)
+ ri = m;
+ else
+ li = m;
+ }
+
+ return (struct bset_search_iter) {
+ table_to_bkey(t, li),
+ ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+ };
+}
+
+static struct bset_search_iter bset_search_tree(struct btree *b,
+ struct bset_tree *t,
+ const struct bkey *search)
+{
+ struct bkey *l, *r;
+ struct bkey_float *f;
+ unsigned inorder, j, n = 1;
+
+ do {
+ unsigned p = n << 4;
+ p &= ((int) (p - t->size)) >> 31;
+
+ prefetch(&t->tree[p]);
+
+ j = n;
+ f = &t->tree[j];
+
+ /*
+ * n = (f->mantissa > bfloat_mantissa())
+ * ? j * 2
+ * : j * 2 + 1;
+ *
+ * We need to subtract 1 from f->mantissa for the sign bit trick
+ * to work - that's done in make_bfloat()
+ */
+ if (likely(f->exponent != 127))
+ n = j * 2 + (((unsigned)
+ (f->mantissa -
+ bfloat_mantissa(search, f))) >> 31);
+ else
+ n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
+ ? j * 2
+ : j * 2 + 1;
+ } while (n < t->size);
+
+ inorder = to_inorder(j, t);
+
+ /*
+ * n would have been the node we recursed to - the low bit tells us if
+ * we recursed left or recursed right.
+ */
+ if (n & 1) {
+ l = cacheline_to_bkey(t, inorder, f->m);
+
+ if (++inorder != t->size) {
+ f = &t->tree[inorder_next(j, t->size)];
+ r = cacheline_to_bkey(t, inorder, f->m);
+ } else
+ r = end(t->data);
+ } else {
+ r = cacheline_to_bkey(t, inorder, f->m);
+
+ if (--inorder) {
+ f = &t->tree[inorder_prev(j, t->size)];
+ l = cacheline_to_bkey(t, inorder, f->m);
+ } else
+ l = t->data->start;
+ }
+
+ return (struct bset_search_iter) {l, r};
+}
+
+struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+ const struct bkey *search)
+{
+ struct bset_search_iter i;
+
+ /*
+ * First, we search for a cacheline, then lastly we do a linear search
+ * within that cacheline.
+ *
+ * To search for the cacheline, there's three different possibilities:
+ * * The set is too small to have a search tree, so we just do a linear
+ * search over the whole set.
+ * * The set is the one we're currently inserting into; keeping a full
+ * auxiliary search tree up to date would be too expensive, so we
+ * use a much simpler lookup table to do a binary search -
+ * bset_search_write_set().
+ * * Or we use the auxiliary search tree we constructed earlier -
+ * bset_search_tree()
+ */
+
+ if (unlikely(!t->size)) {
+ i.l = t->data->start;
+ i.r = end(t->data);
+ } else if (bset_written(b, t)) {
+ /*
+ * Each node in the auxiliary search tree covers a certain range
+ * of bits, and keys above and below the set it covers might
+ * differ outside those bits - so we have to special case the
+ * start and end - handle that here:
+ */
+
+ if (unlikely(bkey_cmp(search, &t->end) >= 0))
+ return end(t->data);
+
+ if (unlikely(bkey_cmp(search, t->data->start) < 0))
+ return t->data->start;
+
+ i = bset_search_tree(b, t, search);
+ } else
+ i = bset_search_write_set(b, t, search);
+
+#ifdef CONFIG_BCACHE_EDEBUG
+ BUG_ON(bset_written(b, t) &&
+ i.l != t->data->start &&
+ bkey_cmp(tree_to_prev_bkey(t,
+ inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+ search) > 0);
+
+ BUG_ON(i.r != end(t->data) &&
+ bkey_cmp(i.r, search) <= 0);
+#endif
+
+ while (likely(i.l != i.r) &&
+ bkey_cmp(i.l, search) <= 0)
+ i.l = bkey_next(i.l);
+
+ return i.l;
+}
+
+/* Btree iterator */
+
+static inline bool btree_iter_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static inline bool btree_iter_end(struct btree_iter *iter)
+{
+ return !iter->used;
+}
+
+void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ struct bkey *end)
+{
+ if (k != end)
+ BUG_ON(!heap_add(iter,
+ ((struct btree_iter_set) { k, end }),
+ btree_iter_cmp));
+}
+
+struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
+ struct bkey *search, struct bset_tree *start)
+{
+ struct bkey *ret = NULL;
+ iter->size = ARRAY_SIZE(iter->data);
+ iter->used = 0;
+
+ for (; start <= &b->sets[b->nsets]; start++) {
+ ret = bch_bset_search(b, start, search);
+ bch_btree_iter_push(iter, ret, end(start->data));
+ }
+
+ return ret;
+}
+
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+ struct btree_iter_set unused;
+ struct bkey *ret = NULL;
+
+ if (!btree_iter_end(iter)) {
+ ret = iter->data->k;
+ iter->data->k = bkey_next(iter->data->k);
+
+ if (iter->data->k > iter->data->end) {
+ WARN_ONCE(1, "bset was corrupt!\n");
+ iter->data->k = iter->data->end;
+ }
+
+ if (iter->data->k == iter->data->end)
+ heap_pop(iter, unused, btree_iter_cmp);
+ else
+ heap_sift(iter, 0, btree_iter_cmp);
+ }
+
+ return ret;
+}
+
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ struct btree *b, ptr_filter_fn fn)
+{
+ struct bkey *ret;
+
+ do {
+ ret = bch_btree_iter_next(iter);
+ } while (ret && fn(b, ret));
+
+ return ret;
+}
+
+struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
+{
+ struct btree_iter iter;
+
+ bch_btree_iter_init(b, &iter, search);
+ return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+}
+
+/* Mergesort */
+
+static void btree_sort_fixup(struct btree_iter *iter)
+{
+ while (iter->used > 1) {
+ struct btree_iter_set *top = iter->data, *i = top + 1;
+ struct bkey *k;
+
+ if (iter->used > 2 &&
+ btree_iter_cmp(i[0], i[1]))
+ i++;
+
+ for (k = i->k;
+ k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
+ k = bkey_next(k))
+ if (top->k > i->k)
+ __bch_cut_front(top->k, k);
+ else if (KEY_SIZE(k))
+ bch_cut_back(&START_KEY(k), top->k);
+
+ if (top->k < i->k || k == i->k)
+ break;
+
+ heap_sift(iter, i - top, btree_iter_cmp);
+ }
+}
+
+static void btree_mergesort(struct btree *b, struct bset *out,
+ struct btree_iter *iter,
+ bool fixup, bool remove_stale)
+{
+ struct bkey *k, *last = NULL;
+ bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+ ? bch_ptr_bad
+ : bch_ptr_invalid;
+
+ while (!btree_iter_end(iter)) {
+ if (fixup && !b->level)
+ btree_sort_fixup(iter);
+
+ k = bch_btree_iter_next(iter);
+ if (bad(b, k))
+ continue;
+
+ if (!last) {
+ last = out->start;
+ bkey_copy(last, k);
+ } else if (b->level ||
+ !bch_bkey_try_merge(b, last, k)) {
+ last = bkey_next(last);
+ bkey_copy(last, k);
+ }
+ }
+
+ out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
+
+ pr_debug("sorted %i keys", out->keys);
+ bch_check_key_order(b, out);
+}
+
+static void __btree_sort(struct btree *b, struct btree_iter *iter,
+ unsigned start, unsigned order, bool fixup)
+{
+ uint64_t start_time;
+ bool remove_stale = !b->written;
+ struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
+ order);
+ if (!out) {
+ mutex_lock(&b->c->sort_lock);
+ out = b->c->sort;
+ order = ilog2(bucket_pages(b->c));
+ }
+
+ start_time = local_clock();
+
+ btree_mergesort(b, out, iter, fixup, remove_stale);
+ b->nsets = start;
+
+ if (!fixup && !start && b->written)
+ bch_btree_verify(b, out);
+
+ if (!start && order == b->page_order) {
+ /*
+ * Our temporary buffer is the same size as the btree node's
+ * buffer, we can just swap buffers instead of doing a big
+ * memcpy()
+ */
+
+ out->magic = bset_magic(b->c);
+ out->seq = b->sets[0].data->seq;
+ out->version = b->sets[0].data->version;
+ swap(out, b->sets[0].data);
+
+ if (b->c->sort == b->sets[0].data)
+ b->c->sort = out;
+ } else {
+ b->sets[start].data->keys = out->keys;
+ memcpy(b->sets[start].data->start, out->start,
+ (void *) end(out) - (void *) out->start);
+ }
+
+ if (out == b->c->sort)
+ mutex_unlock(&b->c->sort_lock);
+ else
+ free_pages((unsigned long) out, order);
+
+ if (b->written)
+ bset_build_written_tree(b);
+
+ if (!start) {
+ spin_lock(&b->c->sort_time_lock);
+ bch_time_stats_update(&b->c->sort_time, start_time);
+ spin_unlock(&b->c->sort_time_lock);
+ }
+}
+
+void bch_btree_sort_partial(struct btree *b, unsigned start)
+{
+ size_t oldsize = 0, order = b->page_order, keys = 0;
+ struct btree_iter iter;
+ __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
+
+ BUG_ON(b->sets[b->nsets].data == write_block(b) &&
+ (b->sets[b->nsets].size || b->nsets));
+
+ if (b->written)
+ oldsize = bch_count_data(b);
+
+ if (start) {
+ unsigned i;
+
+ for (i = start; i <= b->nsets; i++)
+ keys += b->sets[i].data->keys;
+
+ order = roundup_pow_of_two(__set_bytes(b->sets->data,
+ keys)) / PAGE_SIZE;
+ if (order)
+ order = ilog2(order);
+ }
+
+ __btree_sort(b, &iter, start, order, false);
+
+ EBUG_ON(b->written && bch_count_data(b) != oldsize);
+}
+
+void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+{
+ BUG_ON(!b->written);
+ __btree_sort(b, iter, 0, b->page_order, true);
+}
+
+void bch_btree_sort_into(struct btree *b, struct btree *new)
+{
+ uint64_t start_time = local_clock();
+
+ struct btree_iter iter;
+ bch_btree_iter_init(b, &iter, NULL);
+
+ btree_mergesort(b, new->sets->data, &iter, false, true);
+
+ spin_lock(&b->c->sort_time_lock);
+ bch_time_stats_update(&b->c->sort_time, start_time);
+ spin_unlock(&b->c->sort_time_lock);
+
+ bkey_copy_key(&new->key, &b->key);
+ new->sets->size = 0;
+}
+
+void bch_btree_sort_lazy(struct btree *b)
+{
+ if (b->nsets) {
+ unsigned i, j, keys = 0, total;
+
+ for (i = 0; i <= b->nsets; i++)
+ keys += b->sets[i].data->keys;
+
+ total = keys;
+
+ for (j = 0; j < b->nsets; j++) {
+ if (keys * 2 < total ||
+ keys < 1000) {
+ bch_btree_sort_partial(b, j);
+ return;
+ }
+
+ keys -= b->sets[j].data->keys;
+ }
+
+ /* Must sort if b->nsets == 3 or we'll overflow */
+ if (b->nsets >= (MAX_BSETS - 1) - b->level) {
+ bch_btree_sort(b);
+ return;
+ }
+ }
+
+ bset_build_written_tree(b);
+}
+
+/* Sysfs stuff */
+
+struct bset_stats {
+ size_t nodes;
+ size_t sets_written, sets_unwritten;
+ size_t bytes_written, bytes_unwritten;
+ size_t floats, failed;
+};
+
+static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
+ struct bset_stats *stats)
+{
+ struct bkey *k;
+ unsigned i;
+
+ stats->nodes++;
+
+ for (i = 0; i <= b->nsets; i++) {
+ struct bset_tree *t = &b->sets[i];
+ size_t bytes = t->data->keys * sizeof(uint64_t);
+ size_t j;
+
+ if (bset_written(b, t)) {
+ stats->sets_written++;
+ stats->bytes_written += bytes;
+
+ stats->floats += t->size - 1;
+
+ for (j = 1; j < t->size; j++)
+ if (t->tree[j].exponent == 127)
+ stats->failed++;
+ } else {
+ stats->sets_unwritten++;
+ stats->bytes_unwritten += bytes;
+ }
+ }
+
+ if (b->level) {
+ struct btree_iter iter;
+
+ for_each_key_filter(b, k, &iter, bch_ptr_bad) {
+ int ret = btree(bset_stats, k, b, op, stats);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+ struct btree_op op;
+ struct bset_stats t;
+ int ret;
+
+ bch_btree_op_init_stack(&op);
+ memset(&t, 0, sizeof(struct bset_stats));
+
+ ret = btree_root(bset_stats, c, &op, &t);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE,
+ "btree nodes: %zu\n"
+ "written sets: %zu\n"
+ "unwritten sets: %zu\n"
+ "written key bytes: %zu\n"
+ "unwritten key bytes: %zu\n"
+ "floats: %zu\n"
+ "failed: %zu\n",
+ t.nodes,
+ t.sets_written, t.sets_unwritten,
+ t.bytes_written, t.bytes_unwritten,
+ t.floats, t.failed);
+}
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
new file mode 100644
index 0000000..57a9cff
--- /dev/null
+++ b/drivers/md/bcache/bset.h
@@ -0,0 +1,379 @@
+#ifndef _BCACHE_BSET_H
+#define _BCACHE_BSET_H
+
+/*
+ * BKEYS:
+ *
+ * A bkey contains a key, a size field, a variable number of pointers, and some
+ * ancillary flag bits.
+ *
+ * We use two different functions for validating bkeys, bch_ptr_invalid and
+ * bch_ptr_bad().
+ *
+ * bch_ptr_invalid() primarily filters out keys and pointers that would be
+ * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
+ * pointer that occur in normal practice but don't point to real data.
+ *
+ * The one exception to the rule that ptr_invalid() filters out invalid keys is
+ * that it also filters out keys of size 0 - these are keys that have been
+ * completely overwritten. It'd be safe to delete these in memory while leaving
+ * them on disk, just unnecessary work - so we filter them out when resorting
+ * instead.
+ *
+ * We can't filter out stale keys when we're resorting, because garbage
+ * collection needs to find them to ensure bucket gens don't wrap around -
+ * unless we're rewriting the btree node those stale keys still exist on disk.
+ *
+ * We also implement functions here for removing some number of sectors from the
+ * front or the back of a bkey - this is mainly used for fixing overlapping
+ * extents, by removing the overlapping sectors from the older key.
+ *
+ * BSETS:
+ *
+ * A bset is an array of bkeys laid out contiguously in memory in sorted order,
+ * along with a header. A btree node is made up of a number of these, written at
+ * different times.
+ *
+ * There could be many of them on disk, but we never allow there to be more than
+ * 4 in memory - we lazily resort as needed.
+ *
+ * We implement code here for creating and maintaining auxiliary search trees
+ * (described below) for searching an individial bset, and on top of that we
+ * implement a btree iterator.
+ *
+ * BTREE ITERATOR:
+ *
+ * Most of the code in bcache doesn't care about an individual bset - it needs
+ * to search entire btree nodes and iterate over them in sorted order.
+ *
+ * The btree iterator code serves both functions; it iterates through the keys
+ * in a btree node in sorted order, starting from either keys after a specific
+ * point (if you pass it a search key) or the start of the btree node.
+ *
+ * AUXILIARY SEARCH TREES:
+ *
+ * Since keys are variable length, we can't use a binary search on a bset - we
+ * wouldn't be able to find the start of the next key. But binary searches are
+ * slow anyways, due to terrible cache behaviour; bcache originally used binary
+ * searches and that code topped out at under 50k lookups/second.
+ *
+ * So we need to construct some sort of lookup table. Since we only insert keys
+ * into the last (unwritten) set, most of the keys within a given btree node are
+ * usually in sets that are mostly constant. We use two different types of
+ * lookup tables to take advantage of this.
+ *
+ * Both lookup tables share in common that they don't index every key in the
+ * set; they index one key every BSET_CACHELINE bytes, and then a linear search
+ * is used for the rest.
+ *
+ * For sets that have been written to disk and are no longer being inserted
+ * into, we construct a binary search tree in an array - traversing a binary
+ * search tree in an array gives excellent locality of reference and is very
+ * fast, since both children of any node are adjacent to each other in memory
+ * (and their grandchildren, and great grandchildren...) - this means
+ * prefetching can be used to great effect.
+ *
+ * It's quite useful performance wise to keep these nodes small - not just
+ * because they're more likely to be in L2, but also because we can prefetch
+ * more nodes on a single cacheline and thus prefetch more iterations in advance
+ * when traversing this tree.
+ *
+ * Nodes in the auxiliary search tree must contain both a key to compare against
+ * (we don't want to fetch the key from the set, that would defeat the purpose),
+ * and a pointer to the key. We use a few tricks to compress both of these.
+ *
+ * To compress the pointer, we take advantage of the fact that one node in the
+ * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
+ * a function (to_inorder()) that takes the index of a node in a binary tree and
+ * returns what its index would be in an inorder traversal, so we only have to
+ * store the low bits of the offset.
+ *
+ * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
+ * compress that, we take advantage of the fact that when we're traversing the
+ * search tree at every iteration we know that both our search key and the key
+ * we're looking for lie within some range - bounded by our previous
+ * comparisons. (We special case the start of a search so that this is true even
+ * at the root of the tree).
+ *
+ * So we know the key we're looking for is between a and b, and a and b don't
+ * differ higher than bit 50, we don't need to check anything higher than bit
+ * 50.
+ *
+ * We don't usually need the rest of the bits, either; we only need enough bits
+ * to partition the key range we're currently checking. Consider key n - the
+ * key our auxiliary search tree node corresponds to, and key p, the key
+ * immediately preceding n. The lowest bit we need to store in the auxiliary
+ * search tree is the highest bit that differs between n and p.
+ *
+ * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
+ * comparison. But we'd really like our nodes in the auxiliary search tree to be
+ * of fixed size.
+ *
+ * The solution is to make them fixed size, and when we're constructing a node
+ * check if p and n differed in the bits we needed them to. If they don't we
+ * flag that node, and when doing lookups we fallback to comparing against the
+ * real key. As long as this doesn't happen to often (and it seems to reliably
+ * happen a bit less than 1% of the time), we win - even on failures, that key
+ * is then more likely to be in cache than if we were doing binary searches all
+ * the way, since we're touching so much less memory.
+ *
+ * The keys in the auxiliary search tree are stored in (software) floating
+ * point, with an exponent and a mantissa. The exponent needs to be big enough
+ * to address all the bits in the original key, but the number of bits in the
+ * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
+ *
+ * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
+ * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
+ * We need one node per 128 bytes in the btree node, which means the auxiliary
+ * search trees take up 3% as much memory as the btree itself.
+ *
+ * Constructing these auxiliary search trees is moderately expensive, and we
+ * don't want to be constantly rebuilding the search tree for the last set
+ * whenever we insert another key into it. For the unwritten set, we use a much
+ * simpler lookup table - it's just a flat array, so index i in the lookup table
+ * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
+ * within each byte range works the same as with the auxiliary search trees.
+ *
+ * These are much easier to keep up to date when we insert a key - we do it
+ * somewhat lazily; when we shift a key up we usually just increment the pointer
+ * to it, only when it would overflow do we go to the trouble of finding the
+ * first key in that range of bytes again.
+ */
+
+/* Btree key comparison/iteration */
+
+struct btree_iter {
+ size_t size, used;
+ struct btree_iter_set {
+ struct bkey *k, *end;
+ } data[MAX_BSETS];
+};
+
+struct bset_tree {
+ /*
+ * We construct a binary tree in an array as if the array
+ * started at 1, so that things line up on the same cachelines
+ * better: see comments in bset.c at cacheline_to_bkey() for
+ * details
+ */
+
+ /* size of the binary tree and prev array */
+ unsigned size;
+
+ /* function of size - precalculated for to_inorder() */
+ unsigned extra;
+
+ /* copy of the last key in the set */
+ struct bkey end;
+ struct bkey_float *tree;
+
+ /*
+ * The nodes in the bset tree point to specific keys - this
+ * array holds the sizes of the previous key.
+ *
+ * Conceptually it's a member of struct bkey_float, but we want
+ * to keep bkey_float to 4 bytes and prev isn't used in the fast
+ * path.
+ */
+ uint8_t *prev;
+
+ /* The actual btree node, with pointers to each sorted set */
+ struct bset *data;
+};
+
+static __always_inline int64_t bkey_cmp(const struct bkey *l,
+ const struct bkey *r)
+{
+ return unlikely(KEY_INODE(l) != KEY_INODE(r))
+ ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
+ : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
+}
+
+static inline size_t bkey_u64s(const struct bkey *k)
+{
+ BUG_ON(KEY_CSUM(k) > 1);
+ return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
+}
+
+static inline size_t bkey_bytes(const struct bkey *k)
+{
+ return bkey_u64s(k) * sizeof(uint64_t);
+}
+
+static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
+{
+ memcpy(dest, src, bkey_bytes(src));
+}
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+ if (!src)
+ src = &KEY(0, 0, 0);
+
+ SET_KEY_INODE(dest, KEY_INODE(src));
+ SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+ uint64_t *d = (void *) k;
+ return (struct bkey *) (d + bkey_u64s(k));
+}
+
+/* Keylists */
+
+struct keylist {
+ struct bkey *top;
+ union {
+ uint64_t *list;
+ struct bkey *bottom;
+ };
+
+ /* Enough room for btree_split's keys without realloc */
+#define KEYLIST_INLINE 16
+ uint64_t d[KEYLIST_INLINE];
+};
+
+static inline void bch_keylist_init(struct keylist *l)
+{
+ l->top = (void *) (l->list = l->d);
+}
+
+static inline void bch_keylist_push(struct keylist *l)
+{
+ l->top = bkey_next(l->top);
+}
+
+static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
+{
+ bkey_copy(l->top, k);
+ bch_keylist_push(l);
+}
+
+static inline bool bch_keylist_empty(struct keylist *l)
+{
+ return l->top == (void *) l->list;
+}
+
+static inline void bch_keylist_free(struct keylist *l)
+{
+ if (l->list != l->d)
+ kfree(l->list);
+}
+
+void bch_keylist_copy(struct keylist *, struct keylist *);
+struct bkey *bch_keylist_pop(struct keylist *);
+int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
+
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+ unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, k) > 0);
+ return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+ return __bch_cut_back(where, k);
+}
+
+const char *bch_ptr_status(struct cache_set *, const struct bkey *);
+bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
+bool bch_ptr_bad(struct btree *, const struct bkey *);
+
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+ uint8_t r = a - b;
+ return r > 128U ? 0 : r;
+}
+
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+ unsigned i)
+{
+ return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
+}
+
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+ unsigned i)
+{
+ return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+}
+
+
+typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
+
+struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+ struct btree *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
+ struct bkey *, struct bset_tree *);
+
+/* 32 bits total: */
+#define BKEY_MID_BITS 3
+#define BKEY_EXPONENT_BITS 7
+#define BKEY_MANTISSA_BITS 22
+#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+ unsigned exponent:BKEY_EXPONENT_BITS;
+ unsigned m:BKEY_MID_BITS;
+ unsigned mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE 128
+#define bset_tree_space(b) (btree_data_space(b) / BSET_CACHELINE)
+
+#define bset_tree_bytes(b) (bset_tree_space(b) * sizeof(struct bkey_float))
+#define bset_prev_bytes(b) (bset_tree_space(b) * sizeof(uint8_t))
+
+void bch_bset_init_next(struct btree *);
+
+void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
+void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
+ const struct bkey *);
+
+static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
+ const struct bkey *search)
+{
+ return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
+void bch_btree_sort_lazy(struct btree *);
+void bch_btree_sort_into(struct btree *, struct btree *);
+void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
+void bch_btree_sort_partial(struct btree *, unsigned);
+
+static inline void bch_btree_sort(struct btree *b)
+{
+ bch_btree_sort_partial(b, 0);
+}
+
+int bch_bset_print_stats(struct cache_set *, char *);
+
+#endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
new file mode 100644
index 0000000..7a5658f
--- /dev/null
+++ b/drivers/md/bcache/btree.c
@@ -0,0 +1,2503 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/hash.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <trace/events/bcache.h>
+
+/*
+ * Todo:
+ * register_bcache: Return errors out to userspace correctly
+ *
+ * Writeback: don't undirty key until after a cache flush
+ *
+ * Create an iterator for key pointers
+ *
+ * On btree write error, mark bucket such that it won't be freed from the cache
+ *
+ * Journalling:
+ * Check for bad keys in replay
+ * Propagate barriers
+ * Refcount journal entries in journal_replay
+ *
+ * Garbage collection:
+ * Finish incremental gc
+ * Gc should free old UUIDs, data for invalid UUIDs
+ *
+ * Provide a way to list backing device UUIDs we have data cached for, and
+ * probably how long it's been since we've seen them, and a way to invalidate
+ * dirty data for devices that will never be attached again
+ *
+ * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
+ * that based on that and how much dirty data we have we can keep writeback
+ * from being starved
+ *
+ * Add a tracepoint or somesuch to watch for writeback starvation
+ *
+ * When btree depth > 1 and splitting an interior node, we have to make sure
+ * alloc_bucket() cannot fail. This should be true but is not completely
+ * obvious.
+ *
+ * Make sure all allocations get charged to the root cgroup
+ *
+ * Plugging?
+ *
+ * If data write is less than hard sector size of ssd, round up offset in open
+ * bucket to the next whole sector
+ *
+ * Also lookup by cgroup in get_open_bucket()
+ *
+ * Superblock needs to be fleshed out for multiple cache devices
+ *
+ * Add a sysfs tunable for the number of writeback IOs in flight
+ *
+ * Add a sysfs tunable for the number of open data buckets
+ *
+ * IO tracking: Can we track when one process is doing io on behalf of another?
+ * IO tracking: Don't use just an average, weigh more recent stuff higher
+ *
+ * Test module load/unload
+ */
+
+static const char * const op_types[] = {
+ "insert", "replace"
+};
+
+static const char *op_type(struct btree_op *op)
+{
+ return op_types[op->type];
+}
+
+#define MAX_NEED_GC 64
+#define MAX_SAVE_PRIO 72
+
+#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
+
+#define PTR_HASH(c, k) \
+ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+
+struct workqueue_struct *bch_gc_wq;
+static struct workqueue_struct *btree_io_wq;
+
+void bch_btree_op_init_stack(struct btree_op *op)
+{
+ memset(op, 0, sizeof(struct btree_op));
+ closure_init_stack(&op->cl);
+ op->lock = -1;
+ bch_keylist_init(&op->keys);
+}
+
+/* Btree key manipulation */
+
+static void bkey_put(struct cache_set *c, struct bkey *k, int level)
+{
+ if ((level && KEY_OFFSET(k)) || !level)
+ __bkey_put(c, k);
+}
+
+/* Btree IO */
+
+static uint64_t btree_csum_set(struct btree *b, struct bset *i)
+{
+ uint64_t crc = b->key.ptr[0];
+ void *data = (void *) i + 8, *end = end(i);
+
+ crc = bch_crc64_update(crc, data, end - data);
+ return crc ^ 0xffffffffffffffffULL;
+}
+
+static void btree_bio_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ struct btree *b = container_of(cl, struct btree, io.cl);
+
+ if (error)
+ set_btree_node_io_error(b);
+
+ bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE)
+ ? "writing btree" : "reading btree");
+ closure_put(cl);
+}
+
+static void btree_bio_init(struct btree *b)
+{
+ BUG_ON(b->bio);
+ b->bio = bch_bbio_alloc(b->c);
+
+ b->bio->bi_end_io = btree_bio_endio;
+ b->bio->bi_private = &b->io.cl;
+}
+
+void bch_btree_read_done(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io.cl);
+ struct bset *i = b->sets[0].data;
+ struct btree_iter *iter = b->c->fill_iter;
+ const char *err = "bad btree header";
+ BUG_ON(b->nsets || b->written);
+
+ bch_bbio_free(b->bio, b->c);
+ b->bio = NULL;
+
+ mutex_lock(&b->c->fill_lock);
+ iter->used = 0;
+
+ if (btree_node_io_error(b) ||
+ !i->seq)
+ goto err;
+
+ for (;
+ b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+ i = write_block(b)) {
+ err = "unsupported bset version";
+ if (i->version > BCACHE_BSET_VERSION)
+ goto err;
+
+ err = "bad btree header";
+ if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+ goto err;
+
+ err = "bad magic";
+ if (i->magic != bset_magic(b->c))
+ goto err;
+
+ err = "bad checksum";
+ switch (i->version) {
+ case 0:
+ if (i->csum != csum_set(i))
+ goto err;
+ break;
+ case BCACHE_BSET_VERSION:
+ if (i->csum != btree_csum_set(b, i))
+ goto err;
+ break;
+ }
+
+ err = "empty set";
+ if (i != b->sets[0].data && !i->keys)
+ goto err;
+
+ bch_btree_iter_push(iter, i->start, end(i));
+
+ b->written += set_blocks(i, b->c);
+ }
+
+ err = "corrupted btree";
+ for (i = write_block(b);
+ index(i, b) < btree_blocks(b);
+ i = ((void *) i) + block_bytes(b->c))
+ if (i->seq == b->sets[0].data->seq)
+ goto err;
+
+ bch_btree_sort_and_fix_extents(b, iter);
+
+ i = b->sets[0].data;
+ err = "short btree key";
+ if (b->sets[0].size &&
+ bkey_cmp(&b->key, &b->sets[0].end) < 0)
+ goto err;
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(b);
+out:
+
+ mutex_unlock(&b->c->fill_lock);
+
+ spin_lock(&b->c->btree_read_time_lock);
+ bch_time_stats_update(&b->c->btree_read_time, b->io_start_time);
+ spin_unlock(&b->c->btree_read_time_lock);
+
+ smp_wmb(); /* read_done is our write lock */
+ set_btree_node_read_done(b);
+
+ closure_return(cl);
+err:
+ set_btree_node_io_error(b);
+ bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+ err, PTR_BUCKET_NR(b->c, &b->key, 0),
+ index(i, b), i->keys);
+ goto out;
+}
+
+void bch_btree_read(struct btree *b)
+{
+ BUG_ON(b->nsets || b->written);
+
+ if (!closure_trylock(&b->io.cl, &b->c->cl))
+ BUG();
+
+ b->io_start_time = local_clock();
+
+ btree_bio_init(b);
+ b->bio->bi_rw = REQ_META|READ_SYNC;
+ b->bio->bi_size = KEY_SIZE(&b->key) << 9;
+
+ bch_bio_map(b->bio, b->sets[0].data);
+
+ pr_debug("%s", pbtree(b));
+ trace_bcache_btree_read(b->bio);
+ bch_submit_bbio(b->bio, b->c, &b->key, 0);
+
+ continue_at(&b->io.cl, bch_btree_read_done, system_wq);
+}
+
+static void btree_complete_write(struct btree *b, struct btree_write *w)
+{
+ if (w->prio_blocked &&
+ !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
+ wake_up(&b->c->alloc_wait);
+
+ if (w->journal) {
+ atomic_dec_bug(w->journal);
+ __closure_wake_up(&b->c->journal.wait);
+ }
+
+ if (w->owner)
+ closure_put(w->owner);
+
+ w->prio_blocked = 0;
+ w->journal = NULL;
+ w->owner = NULL;
+}
+
+static void __btree_write_done(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree_write *w = btree_prev_write(b);
+
+ bch_bbio_free(b->bio, b->c);
+ b->bio = NULL;
+ btree_complete_write(b, w);
+
+ if (btree_node_dirty(b))
+ queue_delayed_work(btree_io_wq, &b->work,
+ msecs_to_jiffies(30000));
+
+ closure_return(cl);
+}
+
+static void btree_write_done(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io.cl);
+ struct bio_vec *bv;
+ int n;
+
+ __bio_for_each_segment(bv, b->bio, n, 0)
+ __free_page(bv->bv_page);
+
+ __btree_write_done(cl);
+}
+
+static void do_btree_write(struct btree *b)
+{
+ struct closure *cl = &b->io.cl;
+ struct bset *i = b->sets[b->nsets].data;
+ BKEY_PADDED(key) k;
+
+ i->version = BCACHE_BSET_VERSION;
+ i->csum = btree_csum_set(b, i);
+
+ btree_bio_init(b);
+ b->bio->bi_rw = REQ_META|WRITE_SYNC;
+ b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ bch_bio_map(b->bio, i);
+
+ bkey_copy(&k.key, &b->key);
+ SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+
+ if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) {
+ int j;
+ struct bio_vec *bv;
+ void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
+
+ bio_for_each_segment(bv, b->bio, j)
+ memcpy(page_address(bv->bv_page),
+ base + j * PAGE_SIZE, PAGE_SIZE);
+
+ trace_bcache_btree_write(b->bio);
+ bch_submit_bbio(b->bio, b->c, &k.key, 0);
+
+ continue_at(cl, btree_write_done, NULL);
+ } else {
+ b->bio->bi_vcnt = 0;
+ bch_bio_map(b->bio, i);
+
+ trace_bcache_btree_write(b->bio);
+ bch_submit_bbio(b->bio, b->c, &k.key, 0);
+
+ closure_sync(cl);
+ __btree_write_done(cl);
+ }
+}
+
+static void __btree_write(struct btree *b)
+{
+ struct bset *i = b->sets[b->nsets].data;
+
+ BUG_ON(current->bio_list);
+
+ closure_lock(&b->io, &b->c->cl);
+ cancel_delayed_work(&b->work);
+
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ change_bit(BTREE_NODE_write_idx, &b->flags);
+
+ bch_check_key_order(b, i);
+ BUG_ON(b->written && !i->keys);
+
+ do_btree_write(b);
+
+ pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys);
+
+ b->written += set_blocks(i, b->c);
+ atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+ &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+
+ bch_btree_sort_lazy(b);
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(b);
+}
+
+static void btree_write_work(struct work_struct *w)
+{
+ struct btree *b = container_of(to_delayed_work(w), struct btree, work);
+
+ down_write(&b->lock);
+
+ if (btree_node_dirty(b))
+ __btree_write(b);
+ up_write(&b->lock);
+}
+
+void bch_btree_write(struct btree *b, bool now, struct btree_op *op)
+{
+ struct bset *i = b->sets[b->nsets].data;
+ struct btree_write *w = btree_current_write(b);
+
+ BUG_ON(b->written &&
+ (b->written >= btree_blocks(b) ||
+ i->seq != b->sets[0].data->seq ||
+ !i->keys));
+
+ if (!btree_node_dirty(b)) {
+ set_btree_node_dirty(b);
+ queue_delayed_work(btree_io_wq, &b->work,
+ msecs_to_jiffies(30000));
+ }
+
+ w->prio_blocked += b->prio_blocked;
+ b->prio_blocked = 0;
+
+ if (op && op->journal && !b->level) {
+ if (w->journal &&
+ journal_pin_cmp(b->c, w, op)) {
+ atomic_dec_bug(w->journal);
+ w->journal = NULL;
+ }
+
+ if (!w->journal) {
+ w->journal = op->journal;
+ atomic_inc(w->journal);
+ }
+ }
+
+ if (current->bio_list)
+ return;
+
+ /* Force write if set is too big */
+ if (now ||
+ b->level ||
+ set_bytes(i) > PAGE_SIZE - 48) {
+ if (op && now) {
+ /* Must wait on multiple writes */
+ BUG_ON(w->owner);
+ w->owner = &op->cl;
+ closure_get(&op->cl);
+ }
+
+ __btree_write(b);
+ }
+ BUG_ON(!b->written);
+}
+
+/*
+ * Btree in memory cache - allocation/freeing
+ * mca -> memory cache
+ */
+
+static void mca_reinit(struct btree *b)
+{
+ unsigned i;
+
+ b->flags = 0;
+ b->written = 0;
+ b->nsets = 0;
+
+ for (i = 0; i < MAX_BSETS; i++)
+ b->sets[i].size = 0;
+ /*
+ * Second loop starts at 1 because b->sets[0]->data is the memory we
+ * allocated
+ */
+ for (i = 1; i < MAX_BSETS; i++)
+ b->sets[i].data = NULL;
+}
+
+#define mca_reserve(c) (((c->root && c->root->level) \
+ ? c->root->level : 1) * 8 + 16)
+#define mca_can_free(c) \
+ max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
+
+static void mca_data_free(struct btree *b)
+{
+ struct bset_tree *t = b->sets;
+ BUG_ON(!closure_is_unlocked(&b->io.cl));
+
+ if (bset_prev_bytes(b) < PAGE_SIZE)
+ kfree(t->prev);
+ else
+ free_pages((unsigned long) t->prev,
+ get_order(bset_prev_bytes(b)));
+
+ if (bset_tree_bytes(b) < PAGE_SIZE)
+ kfree(t->tree);
+ else
+ free_pages((unsigned long) t->tree,
+ get_order(bset_tree_bytes(b)));
+
+ free_pages((unsigned long) t->data, b->page_order);
+
+ t->prev = NULL;
+ t->tree = NULL;
+ t->data = NULL;
+ list_move(&b->list, &b->c->btree_cache_freed);
+ b->c->bucket_cache_used--;
+}
+
+static void mca_bucket_free(struct btree *b)
+{
+ BUG_ON(btree_node_dirty(b));
+
+ b->key.ptr[0] = 0;
+ hlist_del_init_rcu(&b->hash);
+ list_move(&b->list, &b->c->btree_cache_freeable);
+}
+
+static unsigned btree_order(struct bkey *k)
+{
+ return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
+}
+
+static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
+{
+ struct bset_tree *t = b->sets;
+ BUG_ON(t->data);
+
+ b->page_order = max_t(unsigned,
+ ilog2(b->c->btree_pages),
+ btree_order(k));
+
+ t->data = (void *) __get_free_pages(gfp, b->page_order);
+ if (!t->data)
+ goto err;
+
+ t->tree = bset_tree_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_tree_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+ if (!t->tree)
+ goto err;
+
+ t->prev = bset_prev_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_prev_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+ if (!t->prev)
+ goto err;
+
+ list_move(&b->list, &b->c->btree_cache);
+ b->c->bucket_cache_used++;
+ return;
+err:
+ mca_data_free(b);
+}
+
+static struct btree *mca_bucket_alloc(struct cache_set *c,
+ struct bkey *k, gfp_t gfp)
+{
+ struct btree *b = kzalloc(sizeof(struct btree), gfp);
+ if (!b)
+ return NULL;
+
+ init_rwsem(&b->lock);
+ lockdep_set_novalidate_class(&b->lock);
+ INIT_LIST_HEAD(&b->list);
+ INIT_DELAYED_WORK(&b->work, btree_write_work);
+ b->c = c;
+ closure_init_unlocked(&b->io);
+
+ mca_data_alloc(b, k, gfp);
+ return b;
+}
+
+static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
+{
+ lockdep_assert_held(&b->c->bucket_lock);
+
+ if (!down_write_trylock(&b->lock))
+ return -ENOMEM;
+
+ if (b->page_order < min_order) {
+ rw_unlock(true, b);
+ return -ENOMEM;
+ }
+
+ BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+
+ if (cl && btree_node_dirty(b))
+ bch_btree_write(b, true, NULL);
+
+ if (cl)
+ closure_wait_event_async(&b->io.wait, cl,
+ atomic_read(&b->io.cl.remaining) == -1);
+
+ if (btree_node_dirty(b) ||
+ !closure_is_unlocked(&b->io.cl) ||
+ work_pending(&b->work.work)) {
+ rw_unlock(true, b);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+ struct btree *b, *t;
+ unsigned long i, nr = sc->nr_to_scan;
+
+ if (c->shrinker_disabled)
+ return 0;
+
+ if (c->try_harder)
+ return 0;
+
+ /*
+ * If nr == 0, we're supposed to return the number of items we have
+ * cached. Not allowed to return -1.
+ */
+ if (!nr)
+ return mca_can_free(c) * c->btree_pages;
+
+ /* Return -1 if we can't do anything right now */
+ if (sc->gfp_mask & __GFP_WAIT)
+ mutex_lock(&c->bucket_lock);
+ else if (!mutex_trylock(&c->bucket_lock))
+ return -1;
+
+ nr /= c->btree_pages;
+ nr = min_t(unsigned long, nr, mca_can_free(c));
+
+ i = 0;
+ list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+ if (!nr)
+ break;
+
+ if (++i > 3 &&
+ !mca_reap(b, NULL, 0)) {
+ mca_data_free(b);
+ rw_unlock(true, b);
+ --nr;
+ }
+ }
+
+ /*
+ * Can happen right when we first start up, before we've read in any
+ * btree nodes
+ */
+ if (list_empty(&c->btree_cache))
+ goto out;
+
+ for (i = 0; nr && i < c->bucket_cache_used; i++) {
+ b = list_first_entry(&c->btree_cache, struct btree, list);
+ list_rotate_left(&c->btree_cache);
+
+ if (!b->accessed &&
+ !mca_reap(b, NULL, 0)) {
+ mca_bucket_free(b);
+ mca_data_free(b);
+ rw_unlock(true, b);
+ --nr;
+ } else
+ b->accessed = 0;
+ }
+out:
+ nr = mca_can_free(c) * c->btree_pages;
+ mutex_unlock(&c->bucket_lock);
+ return nr;
+}
+
+void bch_btree_cache_free(struct cache_set *c)
+{
+ struct btree *b;
+ struct closure cl;
+ closure_init_stack(&cl);
+
+ if (c->shrink.list.next)
+ unregister_shrinker(&c->shrink);
+
+ mutex_lock(&c->bucket_lock);
+
+#ifdef CONFIG_BCACHE_DEBUG
+ if (c->verify_data)
+ list_move(&c->verify_data->list, &c->btree_cache);
+#endif
+
+ list_splice(&c->btree_cache_freeable,
+ &c->btree_cache);
+
+ while (!list_empty(&c->btree_cache)) {
+ b = list_first_entry(&c->btree_cache, struct btree, list);
+
+ if (btree_node_dirty(b))
+ btree_complete_write(b, btree_current_write(b));
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+
+ mca_data_free(b);
+ }
+
+ while (!list_empty(&c->btree_cache_freed)) {
+ b = list_first_entry(&c->btree_cache_freed,
+ struct btree, list);
+ list_del(&b->list);
+ cancel_delayed_work_sync(&b->work);
+ kfree(b);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+int bch_btree_cache_alloc(struct cache_set *c)
+{
+ unsigned i;
+
+ /* XXX: doesn't check for errors */
+
+ closure_init_unlocked(&c->gc);
+
+ for (i = 0; i < mca_reserve(c); i++)
+ mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+
+ list_splice_init(&c->btree_cache,
+ &c->btree_cache_freeable);
+
+#ifdef CONFIG_BCACHE_DEBUG
+ mutex_init(&c->verify_lock);
+
+ c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+
+ if (c->verify_data &&
+ c->verify_data->sets[0].data)
+ list_del_init(&c->verify_data->list);
+ else
+ c->verify_data = NULL;
+#endif
+
+ c->shrink.shrink = bch_mca_shrink;
+ c->shrink.seeks = 4;
+ c->shrink.batch = c->btree_pages * 2;
+ register_shrinker(&c->shrink);
+
+ return 0;
+}
+
+/* Btree in memory cache - hash table */
+
+static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
+{
+ return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
+}
+
+static struct btree *mca_find(struct cache_set *c, struct bkey *k)
+{
+ struct btree *b;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
+ if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
+ goto out;
+ b = NULL;
+out:
+ rcu_read_unlock();
+ return b;
+}
+
+static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
+ int level, struct closure *cl)
+{
+ int ret = -ENOMEM;
+ struct btree *i;
+
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Trying to free up some memory - i.e. reuse some btree nodes - may
+ * require initiating IO to flush the dirty part of the node. If we're
+ * running under generic_make_request(), that IO will never finish and
+ * we would deadlock. Returning -EAGAIN causes the cache lookup code to
+ * punt to workqueue and retry.
+ */
+ if (current->bio_list)
+ return ERR_PTR(-EAGAIN);
+
+ if (c->try_harder && c->try_harder != cl) {
+ closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ /* XXX: tracepoint */
+ c->try_harder = cl;
+ c->try_harder_start = local_clock();
+retry:
+ list_for_each_entry_reverse(i, &c->btree_cache, list) {
+ int r = mca_reap(i, cl, btree_order(k));
+ if (!r)
+ return i;
+ if (r != -ENOMEM)
+ ret = r;
+ }
+
+ if (ret == -EAGAIN &&
+ closure_blocking(cl)) {
+ mutex_unlock(&c->bucket_lock);
+ closure_sync(cl);
+ mutex_lock(&c->bucket_lock);
+ goto retry;
+ }
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * We can only have one thread cannibalizing other cached btree nodes at a time,
+ * or we'll deadlock. We use an open coded mutex to ensure that, which a
+ * cannibalize_bucket() will take. This means every time we unlock the root of
+ * the btree, we need to release this lock if we have it held.
+ */
+void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
+{
+ if (c->try_harder == cl) {
+ bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
+ c->try_harder = NULL;
+ __closure_wake_up(&c->try_wait);
+ }
+}
+
+static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
+ int level, struct closure *cl)
+{
+ struct btree *b;
+
+ lockdep_assert_held(&c->bucket_lock);
+
+ if (mca_find(c, k))
+ return NULL;
+
+ /* btree_free() doesn't free memory; it sticks the node on the end of
+ * the list. Check if there's any freed nodes there:
+ */
+ list_for_each_entry(b, &c->btree_cache_freeable, list)
+ if (!mca_reap(b, NULL, btree_order(k)))
+ goto out;
+
+ /* We never free struct btree itself, just the memory that holds the on
+ * disk node. Check the freed list before allocating a new one:
+ */
+ list_for_each_entry(b, &c->btree_cache_freed, list)
+ if (!mca_reap(b, NULL, 0)) {
+ mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
+ if (!b->sets[0].data)
+ goto err;
+ else
+ goto out;
+ }
+
+ b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
+ if (!b)
+ goto err;
+
+ BUG_ON(!down_write_trylock(&b->lock));
+ if (!b->sets->data)
+ goto err;
+out:
+ BUG_ON(!closure_is_unlocked(&b->io.cl));
+
+ bkey_copy(&b->key, k);
+ list_move(&b->list, &c->btree_cache);
+ hlist_del_init_rcu(&b->hash);
+ hlist_add_head_rcu(&b->hash, mca_hash(c, k));
+
+ lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
+ b->level = level;
+
+ mca_reinit(b);
+
+ return b;
+err:
+ if (b)
+ rw_unlock(true, b);
+
+ b = mca_cannibalize(c, k, level, cl);
+ if (!IS_ERR(b))
+ goto out;
+
+ return b;
+}
+
+/**
+ * bch_btree_node_get - find a btree node in the cache and lock it, reading it
+ * in from disk if necessary.
+ *
+ * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
+ * if that closure is in non blocking mode, will return -EAGAIN.
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * level and op->lock.
+ */
+struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
+ int level, struct btree_op *op)
+{
+ int i = 0;
+ bool write = level <= op->lock;
+ struct btree *b;
+
+ BUG_ON(level < 0);
+retry:
+ b = mca_find(c, k);
+
+ if (!b) {
+ mutex_lock(&c->bucket_lock);
+ b = mca_alloc(c, k, level, &op->cl);
+ mutex_unlock(&c->bucket_lock);
+
+ if (!b)
+ goto retry;
+ if (IS_ERR(b))
+ return b;
+
+ bch_btree_read(b);
+
+ if (!write)
+ downgrade_write(&b->lock);
+ } else {
+ rw_lock(write, b, level);
+ if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
+ rw_unlock(write, b);
+ goto retry;
+ }
+ BUG_ON(b->level != level);
+ }
+
+ b->accessed = 1;
+
+ for (; i <= b->nsets && b->sets[i].size; i++) {
+ prefetch(b->sets[i].tree);
+ prefetch(b->sets[i].data);
+ }
+
+ for (; i <= b->nsets; i++)
+ prefetch(b->sets[i].data);
+
+ if (!closure_wait_event(&b->io.wait, &op->cl,
+ btree_node_read_done(b))) {
+ rw_unlock(write, b);
+ b = ERR_PTR(-EAGAIN);
+ } else if (btree_node_io_error(b)) {
+ rw_unlock(write, b);
+ b = ERR_PTR(-EIO);
+ } else
+ BUG_ON(!b->written);
+
+ return b;
+}
+
+static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
+{
+ struct btree *b;
+
+ mutex_lock(&c->bucket_lock);
+ b = mca_alloc(c, k, level, NULL);
+ mutex_unlock(&c->bucket_lock);
+
+ if (!IS_ERR_OR_NULL(b)) {
+ bch_btree_read(b);
+ rw_unlock(true, b);
+ }
+}
+
+/* Btree alloc */
+
+static void btree_node_free(struct btree *b, struct btree_op *op)
+{
+ unsigned i;
+
+ /*
+ * The BUG_ON() in btree_node_get() implies that we must have a write
+ * lock on parent to free or even invalidate a node
+ */
+ BUG_ON(op->lock <= b->level);
+ BUG_ON(b == b->c->root);
+ pr_debug("bucket %s", pbtree(b));
+
+ if (btree_node_dirty(b))
+ btree_complete_write(b, btree_current_write(b));
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+
+ if (b->prio_blocked &&
+ !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
+ wake_up(&b->c->alloc_wait);
+
+ b->prio_blocked = 0;
+
+ cancel_delayed_work(&b->work);
+
+ mutex_lock(&b->c->bucket_lock);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
+
+ bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
+ PTR_BUCKET(b->c, &b->key, i));
+ }
+
+ bch_bucket_free(b->c, &b->key);
+ mca_bucket_free(b);
+ mutex_unlock(&b->c->bucket_lock);
+}
+
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
+ struct closure *cl)
+{
+ BKEY_PADDED(key) k;
+ struct btree *b = ERR_PTR(-EAGAIN);
+
+ mutex_lock(&c->bucket_lock);
+retry:
+ if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
+ goto err;
+
+ SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
+
+ b = mca_alloc(c, &k.key, level, cl);
+ if (IS_ERR(b))
+ goto err_free;
+
+ if (!b) {
+ cache_bug(c,
+ "Tried to allocate bucket that was in btree cache");
+ __bkey_put(c, &k.key);
+ goto retry;
+ }
+
+ set_btree_node_read_done(b);
+ b->accessed = 1;
+ bch_bset_init_next(b);
+
+ mutex_unlock(&c->bucket_lock);
+ return b;
+err_free:
+ bch_bucket_free(c, &k.key);
+ __bkey_put(c, &k.key);
+err:
+ mutex_unlock(&c->bucket_lock);
+ return b;
+}
+
+static struct btree *btree_node_alloc_replacement(struct btree *b,
+ struct closure *cl)
+{
+ struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
+ if (!IS_ERR_OR_NULL(n))
+ bch_btree_sort_into(b, n);
+
+ return n;
+}
+
+/* Garbage collection */
+
+uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
+{
+ uint8_t stale = 0;
+ unsigned i;
+ struct bucket *g;
+
+ /*
+ * ptr_invalid() can't return true for the keys that mark btree nodes as
+ * freed, but since ptr_bad() returns true we'll never actually use them
+ * for anything and thus we don't want mark their pointers here
+ */
+ if (!bkey_cmp(k, &ZERO_KEY))
+ return stale;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (!ptr_available(c, k, i))
+ continue;
+
+ g = PTR_BUCKET(c, k, i);
+
+ if (gen_after(g->gc_gen, PTR_GEN(k, i)))
+ g->gc_gen = PTR_GEN(k, i);
+
+ if (ptr_stale(c, k, i)) {
+ stale = max(stale, ptr_stale(c, k, i));
+ continue;
+ }
+
+ cache_bug_on(GC_MARK(g) &&
+ (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
+ c, "inconsistent ptrs: mark = %llu, level = %i",
+ GC_MARK(g), level);
+
+ if (level)
+ SET_GC_MARK(g, GC_MARK_METADATA);
+ else if (KEY_DIRTY(k))
+ SET_GC_MARK(g, GC_MARK_DIRTY);
+
+ /* guard against overflow */
+ SET_GC_SECTORS_USED(g, min_t(unsigned,
+ GC_SECTORS_USED(g) + KEY_SIZE(k),
+ (1 << 14) - 1));
+
+ BUG_ON(!GC_SECTORS_USED(g));
+ }
+
+ return stale;
+}
+
+#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
+
+static int btree_gc_mark_node(struct btree *b, unsigned *keys,
+ struct gc_stat *gc)
+{
+ uint8_t stale = 0;
+ unsigned last_dev = -1;
+ struct bcache_device *d = NULL;
+ struct bkey *k;
+ struct btree_iter iter;
+ struct bset_tree *t;
+
+ gc->nodes++;
+
+ for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+ if (last_dev != KEY_INODE(k)) {
+ last_dev = KEY_INODE(k);
+
+ d = KEY_INODE(k) < b->c->nr_uuids
+ ? b->c->devices[last_dev]
+ : NULL;
+ }
+
+ stale = max(stale, btree_mark_key(b, k));
+
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ *keys += bkey_u64s(k);
+
+ gc->key_bytes += bkey_u64s(k);
+ gc->nkeys++;
+
+ gc->data += KEY_SIZE(k);
+ if (KEY_DIRTY(k)) {
+ gc->dirty += KEY_SIZE(k);
+ if (d)
+ d->sectors_dirty_gc += KEY_SIZE(k);
+ }
+ }
+
+ for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+ btree_bug_on(t->size &&
+ bset_written(b, t) &&
+ bkey_cmp(&b->key, &t->end) < 0,
+ b, "found short btree key in gc");
+
+ return stale;
+}
+
+static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
+ struct btree_op *op)
+{
+ /*
+ * We block priorities from being written for the duration of garbage
+ * collection, so we can't sleep in btree_alloc() ->
+ * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
+ * our closure.
+ */
+ struct btree *n = btree_node_alloc_replacement(b, NULL);
+
+ if (!IS_ERR_OR_NULL(n)) {
+ swap(b, n);
+
+ memcpy(k->ptr, b->key.ptr,
+ sizeof(uint64_t) * KEY_PTRS(&b->key));
+
+ __bkey_put(b->c, &b->key);
+ atomic_inc(&b->c->prio_blocked);
+ b->prio_blocked++;
+
+ btree_node_free(n, op);
+ up_write(&n->lock);
+ }
+
+ return b;
+}
+
+/*
+ * Leaving this at 2 until we've got incremental garbage collection done; it
+ * could be higher (and has been tested with 4) except that garbage collection
+ * could take much longer, adversely affecting latency.
+ */
+#define GC_MERGE_NODES 2U
+
+struct gc_merge_info {
+ struct btree *b;
+ struct bkey *k;
+ unsigned keys;
+};
+
+static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ struct gc_stat *gc, struct gc_merge_info *r)
+{
+ unsigned nodes = 0, keys = 0, blocks;
+ int i;
+
+ while (nodes < GC_MERGE_NODES && r[nodes].b)
+ keys += r[nodes++].keys;
+
+ blocks = btree_default_blocks(b->c) * 2 / 3;
+
+ if (nodes < 2 ||
+ __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+ return;
+
+ for (i = nodes - 1; i >= 0; --i) {
+ if (r[i].b->written)
+ r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
+
+ if (r[i].b->written)
+ return;
+ }
+
+ for (i = nodes - 1; i > 0; --i) {
+ struct bset *n1 = r[i].b->sets->data;
+ struct bset *n2 = r[i - 1].b->sets->data;
+ struct bkey *k, *last = NULL;
+
+ keys = 0;
+
+ if (i == 1) {
+ /*
+ * Last node we're not getting rid of - we're getting
+ * rid of the node at r[0]. Have to try and fit all of
+ * the remaining keys into this node; we can't ensure
+ * they will always fit due to rounding and variable
+ * length keys (shouldn't be possible in practice,
+ * though)
+ */
+ if (__set_blocks(n1, n1->keys + r->keys,
+ b->c) > btree_blocks(r[i].b))
+ return;
+
+ keys = n2->keys;
+ last = &r->b->key;
+ } else
+ for (k = n2->start;
+ k < end(n2);
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k), b->c) > blocks)
+ break;
+
+ last = k;
+ keys += bkey_u64s(k);
+ }
+
+ BUG_ON(__set_blocks(n1, n1->keys + keys,
+ b->c) > btree_blocks(r[i].b));
+
+ if (last) {
+ bkey_copy_key(&r[i].b->key, last);
+ bkey_copy_key(r[i].k, last);
+ }
+
+ memcpy(end(n1),
+ n2->start,
+ (void *) node(n2, keys) - (void *) n2->start);
+
+ n1->keys += keys;
+
+ memmove(n2->start,
+ node(n2, keys),
+ (void *) end(n2) - (void *) node(n2, keys));
+
+ n2->keys -= keys;
+
+ r[i].keys = n1->keys;
+ r[i - 1].keys = n2->keys;
+ }
+
+ btree_node_free(r->b, op);
+ up_write(&r->b->lock);
+
+ pr_debug("coalesced %u nodes", nodes);
+
+ gc->nodes--;
+ nodes--;
+
+ memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
+ memset(&r[nodes], 0, sizeof(struct gc_merge_info));
+}
+
+static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
+ void write(struct btree *r)
+ {
+ if (!r->written)
+ bch_btree_write(r, true, op);
+ else if (btree_node_dirty(r)) {
+ BUG_ON(btree_current_write(r)->owner);
+ btree_current_write(r)->owner = writes;
+ closure_get(writes);
+
+ bch_btree_write(r, true, NULL);
+ }
+
+ up_write(&r->lock);
+ }
+
+ int ret = 0, stale;
+ unsigned i;
+ struct gc_merge_info r[GC_MERGE_NODES];
+
+ memset(r, 0, sizeof(r));
+
+ while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
+ r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
+
+ if (IS_ERR(r->b)) {
+ ret = PTR_ERR(r->b);
+ break;
+ }
+
+ r->keys = 0;
+ stale = btree_gc_mark_node(r->b, &r->keys, gc);
+
+ if (!b->written &&
+ (r->b->level || stale > 10 ||
+ b->c->gc_always_rewrite))
+ r->b = btree_gc_alloc(r->b, r->k, op);
+
+ if (r->b->level)
+ ret = btree_gc_recurse(r->b, op, writes, gc);
+
+ if (ret) {
+ write(r->b);
+ break;
+ }
+
+ bkey_copy_key(&b->c->gc_done, r->k);
+
+ if (!b->written)
+ btree_gc_coalesce(b, op, gc, r);
+
+ if (r[GC_MERGE_NODES - 1].b)
+ write(r[GC_MERGE_NODES - 1].b);
+
+ memmove(&r[1], &r[0],
+ sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
+
+ /* When we've got incremental GC working, we'll want to do
+ * if (should_resched())
+ * return -EAGAIN;
+ */
+ cond_resched();
+#if 0
+ if (need_resched()) {
+ ret = -EAGAIN;
+ break;
+ }
+#endif
+ }
+
+ for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
+ write(r[i].b);
+
+ /* Might have freed some children, must remove their keys */
+ if (!b->written)
+ bch_btree_sort(b);
+
+ return ret;
+}
+
+static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
+ struct btree *n = NULL;
+ unsigned keys = 0;
+ int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
+
+ if (b->level || stale > 10)
+ n = btree_node_alloc_replacement(b, NULL);
+
+ if (!IS_ERR_OR_NULL(n))
+ swap(b, n);
+
+ if (b->level)
+ ret = btree_gc_recurse(b, op, writes, gc);
+
+ if (!b->written || btree_node_dirty(b)) {
+ atomic_inc(&b->c->prio_blocked);
+ b->prio_blocked++;
+ bch_btree_write(b, true, n ? op : NULL);
+ }
+
+ if (!IS_ERR_OR_NULL(n)) {
+ closure_sync(&op->cl);
+ bch_btree_set_root(b);
+ btree_node_free(n, op);
+ rw_unlock(true, b);
+ }
+
+ return ret;
+}
+
+static void btree_gc_start(struct cache_set *c)
+{
+ struct cache *ca;
+ struct bucket *b;
+ struct bcache_device **d;
+ unsigned i;
+
+ if (!c->gc_mark_valid)
+ return;
+
+ mutex_lock(&c->bucket_lock);
+
+ c->gc_mark_valid = 0;
+ c->gc_done = ZERO_KEY;
+
+ for_each_cache(ca, c, i)
+ for_each_bucket(b, ca) {
+ b->gc_gen = b->gen;
+ if (!atomic_read(&b->pin))
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ }
+
+ for (d = c->devices;
+ d < c->devices + c->nr_uuids;
+ d++)
+ if (*d)
+ (*d)->sectors_dirty_gc = 0;
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+size_t bch_btree_gc_finish(struct cache_set *c)
+{
+ size_t available = 0;
+ struct bucket *b;
+ struct cache *ca;
+ struct bcache_device **d;
+ unsigned i;
+
+ mutex_lock(&c->bucket_lock);
+
+ set_gc_sectors(c);
+ c->gc_mark_valid = 1;
+ c->need_gc = 0;
+
+ if (c->root)
+ for (i = 0; i < KEY_PTRS(&c->root->key); i++)
+ SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
+ GC_MARK_METADATA);
+
+ for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
+ SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
+ GC_MARK_METADATA);
+
+ for_each_cache(ca, c, i) {
+ uint64_t *i;
+
+ ca->invalidate_needs_gc = 0;
+
+ for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
+ SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
+
+ for (i = ca->prio_buckets;
+ i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
+ SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
+
+ for_each_bucket(b, ca) {
+ b->last_gc = b->gc_gen;
+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+
+ if (!atomic_read(&b->pin) &&
+ GC_MARK(b) == GC_MARK_RECLAIMABLE) {
+ available++;
+ if (!GC_SECTORS_USED(b))
+ bch_bucket_add_unused(ca, b);
+ }
+ }
+ }
+
+ for (d = c->devices;
+ d < c->devices + c->nr_uuids;
+ d++)
+ if (*d) {
+ unsigned long last =
+ atomic_long_read(&((*d)->sectors_dirty));
+ long difference = (*d)->sectors_dirty_gc - last;
+
+ pr_debug("sectors dirty off by %li", difference);
+
+ (*d)->sectors_dirty_last += difference;
+
+ atomic_long_set(&((*d)->sectors_dirty),
+ (*d)->sectors_dirty_gc);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+ return available;
+}
+
+static void bch_btree_gc(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
+ int ret;
+ unsigned long available;
+ struct gc_stat stats;
+ struct closure writes;
+ struct btree_op op;
+
+ uint64_t start_time = local_clock();
+ trace_bcache_gc_start(c->sb.set_uuid);
+ blktrace_msg_all(c, "Starting gc");
+
+ memset(&stats, 0, sizeof(struct gc_stat));
+ closure_init_stack(&writes);
+ bch_btree_op_init_stack(&op);
+ op.lock = SHRT_MAX;
+
+ btree_gc_start(c);
+
+ ret = btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&op.cl);
+ closure_sync(&writes);
+
+ if (ret) {
+ blktrace_msg_all(c, "Stopped gc");
+ pr_warn("gc failed!");
+
+ continue_at(cl, bch_btree_gc, bch_gc_wq);
+ }
+
+ /* Possibly wait for new UUIDs or whatever to hit disk */
+ bch_journal_meta(c, &op.cl);
+ closure_sync(&op.cl);
+
+ available = bch_btree_gc_finish(c);
+
+ bch_time_stats_update(&c->btree_gc_time, start_time);
+
+ stats.key_bytes *= sizeof(uint64_t);
+ stats.dirty <<= 9;
+ stats.data <<= 9;
+ stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
+ memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
+ blktrace_msg_all(c, "Finished gc");
+
+ trace_bcache_gc_end(c->sb.set_uuid);
+ wake_up(&c->alloc_wait);
+
+ continue_at(cl, bch_moving_gc, bch_gc_wq);
+}
+
+void bch_queue_gc(struct cache_set *c)
+{
+ closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
+}
+
+/* Initial partial gc */
+
+static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
+ unsigned long **seen)
+{
+ int ret;
+ unsigned i;
+ struct bkey *k;
+ struct bucket *g;
+ struct btree_iter iter;
+
+ for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (!ptr_available(b->c, k, i))
+ continue;
+
+ g = PTR_BUCKET(b->c, k, i);
+
+ if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
+ seen[PTR_DEV(k, i)]) ||
+ !ptr_stale(b->c, k, i)) {
+ g->gen = PTR_GEN(k, i);
+
+ if (b->level)
+ g->prio = BTREE_PRIO;
+ else if (g->prio == BTREE_PRIO)
+ g->prio = INITIAL_PRIO;
+ }
+ }
+
+ btree_mark_key(b, k);
+ }
+
+ if (b->level) {
+ k = bch_next_recurse_key(b, &ZERO_KEY);
+
+ while (k) {
+ struct bkey *p = bch_next_recurse_key(b, k);
+ if (p)
+ btree_node_prefetch(b->c, p, b->level - 1);
+
+ ret = btree(check_recurse, k, b, op, seen);
+ if (ret)
+ return ret;
+
+ k = p;
+ }
+ }
+
+ return 0;
+}
+
+int bch_btree_check(struct cache_set *c, struct btree_op *op)
+{
+ int ret = -ENOMEM;
+ unsigned i;
+ unsigned long *seen[MAX_CACHES_PER_SET];
+
+ memset(seen, 0, sizeof(seen));
+
+ for (i = 0; c->cache[i]; i++) {
+ size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
+ seen[i] = kmalloc(n, GFP_KERNEL);
+ if (!seen[i])
+ goto err;
+
+ /* Disables the seen array until prio_read() uses it too */
+ memset(seen[i], 0xFF, n);
+ }
+
+ ret = btree_root(check_recurse, c, op, seen);
+err:
+ for (i = 0; i < MAX_CACHES_PER_SET; i++)
+ kfree(seen[i]);
+ return ret;
+}
+
+/* Btree insertion */
+
+static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
+{
+ struct bset *i = b->sets[b->nsets].data;
+
+ memmove((uint64_t *) where + bkey_u64s(insert),
+ where,
+ (void *) end(i) - (void *) where);
+
+ i->keys += bkey_u64s(insert);
+ bkey_copy(where, insert);
+ bch_bset_fix_lookup_table(b, where);
+}
+
+static bool fix_overlapping_extents(struct btree *b,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct btree_op *op)
+{
+ void subtract_dirty(struct bkey *k, int sectors)
+ {
+ struct bcache_device *d = b->c->devices[KEY_INODE(k)];
+
+ if (KEY_DIRTY(k) && d)
+ atomic_long_sub(sectors, &d->sectors_dirty);
+ }
+
+ unsigned old_size, sectors_found = 0;
+
+ while (1) {
+ struct bkey *k = bch_btree_iter_next(iter);
+ if (!k ||
+ bkey_cmp(&START_KEY(k), insert) >= 0)
+ break;
+
+ if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+ continue;
+
+ old_size = KEY_SIZE(k);
+
+ /*
+ * We might overlap with 0 size extents; we can't skip these
+ * because if they're in the set we're inserting to we have to
+ * adjust them so they don't overlap with the key we're
+ * inserting. But we don't want to check them for BTREE_REPLACE
+ * operations.
+ */
+
+ if (op->type == BTREE_REPLACE &&
+ KEY_SIZE(k)) {
+ /*
+ * k might have been split since we inserted/found the
+ * key we're replacing
+ */
+ unsigned i;
+ uint64_t offset = KEY_START(k) -
+ KEY_START(&op->replace);
+
+ /* But it must be a subset of the replace key */
+ if (KEY_START(k) < KEY_START(&op->replace) ||
+ KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+ goto check_failed;
+
+ /* We didn't find a key that we were supposed to */
+ if (KEY_START(k) > KEY_START(insert) + sectors_found)
+ goto check_failed;
+
+ if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+ goto check_failed;
+
+ /* skip past gen */
+ offset <<= 8;
+
+ BUG_ON(!KEY_PTRS(&op->replace));
+
+ for (i = 0; i < KEY_PTRS(&op->replace); i++)
+ if (k->ptr[i] != op->replace.ptr[i] + offset)
+ goto check_failed;
+
+ sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+ }
+
+ if (bkey_cmp(insert, k) < 0 &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+ /*
+ * We overlapped in the middle of an existing key: that
+ * means we have to split the old key. But we have to do
+ * slightly different things depending on whether the
+ * old key has been written out yet.
+ */
+
+ struct bkey *top;
+
+ subtract_dirty(k, KEY_SIZE(insert));
+
+ if (bkey_written(b, k)) {
+ /*
+ * We insert a new key to cover the top of the
+ * old key, and the old key is modified in place
+ * to represent the bottom split.
+ *
+ * It's completely arbitrary whether the new key
+ * is the top or the bottom, but it has to match
+ * up with what btree_sort_fixup() does - it
+ * doesn't check for this kind of overlap, it
+ * depends on us inserting a new key for the top
+ * here.
+ */
+ top = bch_bset_search(b, &b->sets[b->nsets],
+ insert);
+ shift_keys(b, top, k);
+ } else {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, k);
+ shift_keys(b, k, &temp.key);
+ top = bkey_next(k);
+ }
+
+ bch_cut_front(insert, top);
+ bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ return false;
+ }
+
+ if (bkey_cmp(insert, k) < 0) {
+ bch_cut_front(insert, k);
+ } else {
+ if (bkey_written(b, k) &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+ /*
+ * Completely overwrote, so we don't have to
+ * invalidate the binary search tree
+ */
+ bch_cut_front(k, k);
+ } else {
+ __bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ }
+ }
+
+ subtract_dirty(k, old_size - KEY_SIZE(k));
+ }
+
+check_failed:
+ if (op->type == BTREE_REPLACE) {
+ if (!sectors_found) {
+ op->insert_collision = true;
+ return true;
+ } else if (sectors_found < KEY_SIZE(insert)) {
+ SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+ (KEY_SIZE(insert) - sectors_found));
+ SET_KEY_SIZE(insert, sectors_found);
+ }
+ }
+
+ return false;
+}
+
+static bool btree_insert_key(struct btree *b, struct btree_op *op,
+ struct bkey *k)
+{
+ struct bset *i = b->sets[b->nsets].data;
+ struct bkey *m, *prev;
+ const char *status = "insert";
+
+ BUG_ON(bkey_cmp(k, &b->key) > 0);
+ BUG_ON(b->level && !KEY_PTRS(k));
+ BUG_ON(!b->level && !KEY_OFFSET(k));
+
+ if (!b->level) {
+ struct btree_iter iter;
+ struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
+
+ /*
+ * bset_search() returns the first key that is strictly greater
+ * than the search key - but for back merging, we want to find
+ * the first key that is greater than or equal to KEY_START(k) -
+ * unless KEY_START(k) is 0.
+ */
+ if (KEY_OFFSET(&search))
+ SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
+
+ prev = NULL;
+ m = bch_btree_iter_init(b, &iter, &search);
+
+ if (fix_overlapping_extents(b, k, &iter, op))
+ return false;
+
+ while (m != end(i) &&
+ bkey_cmp(k, &START_KEY(m)) > 0)
+ prev = m, m = bkey_next(m);
+
+ if (key_merging_disabled(b->c))
+ goto insert;
+
+ /* prev is in the tree, if we merge we're done */
+ status = "back merging";
+ if (prev &&
+ bch_bkey_try_merge(b, prev, k))
+ goto merged;
+
+ status = "overwrote front";
+ if (m != end(i) &&
+ KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+ goto copy;
+
+ status = "front merge";
+ if (m != end(i) &&
+ bch_bkey_try_merge(b, k, m))
+ goto copy;
+ } else
+ m = bch_bset_search(b, &b->sets[b->nsets], k);
+
+insert: shift_keys(b, m, k);
+copy: bkey_copy(m, k);
+merged:
+ bch_check_keys(b, "%s for %s at %s: %s", status,
+ op_type(op), pbtree(b), pkey(k));
+ bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status,
+ op_type(op), pbtree(b), pkey(k));
+
+ if (b->level && !KEY_OFFSET(k))
+ b->prio_blocked++;
+
+ pr_debug("%s for %s at %s: %s", status,
+ op_type(op), pbtree(b), pkey(k));
+
+ return true;
+}
+
+bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+{
+ bool ret = false;
+ struct bkey *k;
+ unsigned oldsize = bch_count_data(b);
+
+ while ((k = bch_keylist_pop(&op->keys))) {
+ bkey_put(b->c, k, b->level);
+ ret |= btree_insert_key(b, op, k);
+ }
+
+ BUG_ON(bch_count_data(b) < oldsize);
+ return ret;
+}
+
+bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bio *bio)
+{
+ bool ret = false;
+ uint64_t btree_ptr = b->key.ptr[0];
+ unsigned long seq = b->seq;
+ BKEY_PADDED(k) tmp;
+
+ rw_unlock(false, b);
+ rw_lock(true, b, b->level);
+
+ if (b->key.ptr[0] != btree_ptr ||
+ b->seq != seq + 1 ||
+ should_split(b))
+ goto out;
+
+ op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio));
+
+ SET_KEY_PTRS(&op->replace, 1);
+ get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
+
+ SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV);
+
+ bkey_copy(&tmp.k, &op->replace);
+
+ BUG_ON(op->type != BTREE_INSERT);
+ BUG_ON(!btree_insert_key(b, op, &tmp.k));
+ bch_btree_write(b, false, NULL);
+ ret = true;
+out:
+ downgrade_write(&b->lock);
+ return ret;
+}
+
+static int btree_split(struct btree *b, struct btree_op *op)
+{
+ bool split, root = b == b->c->root;
+ struct btree *n1, *n2 = NULL, *n3 = NULL;
+ uint64_t start_time = local_clock();
+
+ if (b->level)
+ set_closure_blocking(&op->cl);
+
+ n1 = btree_node_alloc_replacement(b, &op->cl);
+ if (IS_ERR(n1))
+ goto err;
+
+ split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+
+ pr_debug("%ssplitting at %s keys %i", split ? "" : "not ",
+ pbtree(b), n1->sets[0].data->keys);
+
+ if (split) {
+ unsigned keys = 0;
+
+ n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
+ if (IS_ERR(n2))
+ goto err_free1;
+
+ if (root) {
+ n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
+ if (IS_ERR(n3))
+ goto err_free2;
+ }
+
+ bch_btree_insert_keys(n1, op);
+
+ /* Has to be a linear search because we don't have an auxiliary
+ * search tree yet
+ */
+
+ while (keys < (n1->sets[0].data->keys * 3) / 5)
+ keys += bkey_u64s(node(n1->sets[0].data, keys));
+
+ bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
+ keys += bkey_u64s(node(n1->sets[0].data, keys));
+
+ n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
+ n1->sets[0].data->keys = keys;
+
+ memcpy(n2->sets[0].data->start,
+ end(n1->sets[0].data),
+ n2->sets[0].data->keys * sizeof(uint64_t));
+
+ bkey_copy_key(&n2->key, &b->key);
+
+ bch_keylist_add(&op->keys, &n2->key);
+ bch_btree_write(n2, true, op);
+ rw_unlock(true, n2);
+ } else
+ bch_btree_insert_keys(n1, op);
+
+ bch_keylist_add(&op->keys, &n1->key);
+ bch_btree_write(n1, true, op);
+
+ if (n3) {
+ bkey_copy_key(&n3->key, &MAX_KEY);
+ bch_btree_insert_keys(n3, op);
+ bch_btree_write(n3, true, op);
+
+ closure_sync(&op->cl);
+ bch_btree_set_root(n3);
+ rw_unlock(true, n3);
+ } else if (root) {
+ op->keys.top = op->keys.bottom;
+ closure_sync(&op->cl);
+ bch_btree_set_root(n1);
+ } else {
+ unsigned i;
+
+ bkey_copy(op->keys.top, &b->key);
+ bkey_copy_key(op->keys.top, &ZERO_KEY);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
+
+ SET_PTR_GEN(op->keys.top, i, g);
+ }
+
+ bch_keylist_push(&op->keys);
+ closure_sync(&op->cl);
+ atomic_inc(&b->c->prio_blocked);
+ }
+
+ rw_unlock(true, n1);
+ btree_node_free(b, op);
+
+ bch_time_stats_update(&b->c->btree_split_time, start_time);
+
+ return 0;
+err_free2:
+ __bkey_put(n2->c, &n2->key);
+ btree_node_free(n2, op);
+ rw_unlock(true, n2);
+err_free1:
+ __bkey_put(n1->c, &n1->key);
+ btree_node_free(n1, op);
+ rw_unlock(true, n1);
+err:
+ if (n3 == ERR_PTR(-EAGAIN) ||
+ n2 == ERR_PTR(-EAGAIN) ||
+ n1 == ERR_PTR(-EAGAIN))
+ return -EAGAIN;
+
+ pr_warn("couldn't split");
+ return -ENOMEM;
+}
+
+static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
+ struct keylist *stack_keys)
+{
+ if (b->level) {
+ int ret;
+ struct bkey *insert = op->keys.bottom;
+ struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
+
+ if (!k) {
+ btree_bug(b, "no key to recurse on at level %i/%i",
+ b->level, b->c->root->level);
+
+ op->keys.top = op->keys.bottom;
+ return -EIO;
+ }
+
+ if (bkey_cmp(insert, k) > 0) {
+ unsigned i;
+
+ if (op->type == BTREE_REPLACE) {
+ __bkey_put(b->c, insert);
+ op->keys.top = op->keys.bottom;
+ op->insert_collision = true;
+ return 0;
+ }
+
+ for (i = 0; i < KEY_PTRS(insert); i++)
+ atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin);
+
+ bkey_copy(stack_keys->top, insert);
+
+ bch_cut_back(k, insert);
+ bch_cut_front(k, stack_keys->top);
+
+ bch_keylist_push(stack_keys);
+ }
+
+ ret = btree(insert_recurse, k, b, op, stack_keys);
+ if (ret)
+ return ret;
+ }
+
+ if (!bch_keylist_empty(&op->keys)) {
+ if (should_split(b)) {
+ if (op->lock <= b->c->root->level) {
+ BUG_ON(b->level);
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ }
+ return btree_split(b, op);
+ }
+
+ BUG_ON(write_block(b) != b->sets[b->nsets].data);
+
+ if (bch_btree_insert_keys(b, op))
+ bch_btree_write(b, false, op);
+ }
+
+ return 0;
+}
+
+int bch_btree_insert(struct btree_op *op, struct cache_set *c)
+{
+ int ret = 0;
+ struct keylist stack_keys;
+
+ /*
+ * Don't want to block with the btree locked unless we have to,
+ * otherwise we get deadlocks with try_harder and between split/gc
+ */
+ clear_closure_blocking(&op->cl);
+
+ BUG_ON(bch_keylist_empty(&op->keys));
+ bch_keylist_copy(&stack_keys, &op->keys);
+ bch_keylist_init(&op->keys);
+
+ while (!bch_keylist_empty(&stack_keys) ||
+ !bch_keylist_empty(&op->keys)) {
+ if (bch_keylist_empty(&op->keys)) {
+ bch_keylist_add(&op->keys,
+ bch_keylist_pop(&stack_keys));
+ op->lock = 0;
+ }
+
+ ret = btree_root(insert_recurse, c, op, &stack_keys);
+
+ if (ret == -EAGAIN) {
+ ret = 0;
+ closure_sync(&op->cl);
+ } else if (ret) {
+ struct bkey *k;
+
+ pr_err("error %i trying to insert key for %s",
+ ret, op_type(op));
+
+ while ((k = bch_keylist_pop(&stack_keys) ?:
+ bch_keylist_pop(&op->keys)))
+ bkey_put(c, k, 0);
+ }
+ }
+
+ bch_keylist_free(&stack_keys);
+
+ if (op->journal)
+ atomic_dec_bug(op->journal);
+ op->journal = NULL;
+ return ret;
+}
+
+void bch_btree_set_root(struct btree *b)
+{
+ unsigned i;
+
+ BUG_ON(!b->written);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
+
+ mutex_lock(&b->c->bucket_lock);
+ list_del_init(&b->list);
+ mutex_unlock(&b->c->bucket_lock);
+
+ b->c->root = b;
+ __bkey_put(b->c, &b->key);
+
+ bch_journal_meta(b->c, NULL);
+ pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
+}
+
+/* Cache lookup */
+
+static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
+ struct bkey *k)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *bio = &s->bio.bio;
+ int ret = 0;
+
+ while (!ret &&
+ !op->lookup_done) {
+ unsigned sectors = INT_MAX;
+
+ if (KEY_INODE(k) == op->inode) {
+ if (KEY_START(k) <= bio->bi_sector)
+ break;
+
+ sectors = min_t(uint64_t, sectors,
+ KEY_START(k) - bio->bi_sector);
+ }
+
+ ret = s->d->cache_miss(b, s, bio, sectors);
+ }
+
+ return ret;
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
+ struct bkey *k)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *bio = &s->bio.bio;
+ unsigned ptr;
+ struct bio *n;
+
+ int ret = submit_partial_cache_miss(b, op, k);
+ if (ret || op->lookup_done)
+ return ret;
+
+ /* XXX: figure out best pointer - for multiple cache devices */
+ ptr = 0;
+
+ PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+ while (!op->lookup_done &&
+ KEY_INODE(k) == op->inode &&
+ bio->bi_sector < KEY_OFFSET(k)) {
+ struct bkey *bio_key;
+ sector_t sector = PTR_OFFSET(k, ptr) +
+ (bio->bi_sector - KEY_START(k));
+ unsigned sectors = min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_sector);
+
+ n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ if (!n)
+ return -EAGAIN;
+
+ if (n == bio)
+ op->lookup_done = true;
+
+ bio_key = &container_of(n, struct bbio, bio)->key;
+
+ /*
+ * The bucket we're reading from might be reused while our bio
+ * is in flight, and we could then end up reading the wrong
+ * data.
+ *
+ * We guard against this by checking (in cache_read_endio()) if
+ * the pointer is stale again; if so, we treat it as an error
+ * and reread from the backing device (but we don't pass that
+ * error up anywhere).
+ */
+
+ bch_bkey_copy_single_ptr(bio_key, k, ptr);
+ SET_PTR_OFFSET(bio_key, 0, sector);
+
+ n->bi_end_io = bch_cache_read_endio;
+ n->bi_private = &s->cl;
+
+ trace_bcache_cache_hit(n);
+ __bch_submit_bbio(n, b->c);
+ }
+
+ return 0;
+}
+
+int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *bio = &s->bio.bio;
+
+ int ret = 0;
+ struct bkey *k;
+ struct btree_iter iter;
+ bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
+
+ pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode,
+ (uint64_t) bio->bi_sector);
+
+ do {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (!k) {
+ /*
+ * b->key would be exactly what we want, except that
+ * pointers to btree nodes have nonzero size - we
+ * wouldn't go far enough
+ */
+
+ ret = submit_partial_cache_miss(b, op,
+ &KEY(KEY_INODE(&b->key),
+ KEY_OFFSET(&b->key), 0));
+ break;
+ }
+
+ ret = b->level
+ ? btree(search_recurse, k, b, op)
+ : submit_partial_cache_hit(b, op, k);
+ } while (!ret &&
+ !op->lookup_done);
+
+ return ret;
+}
+
+/* Keybuf code */
+
+static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
+{
+ /* Overlapping keys compare equal */
+ if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
+ return -1;
+ if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
+ return 1;
+ return 0;
+}
+
+static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
+ struct keybuf_key *r)
+{
+ return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
+}
+
+static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
+ struct keybuf *buf, struct bkey *end)
+{
+ struct btree_iter iter;
+ bch_btree_iter_init(b, &iter, &buf->last_scanned);
+
+ while (!array_freelist_empty(&buf->freelist)) {
+ struct bkey *k = bch_btree_iter_next_filter(&iter, b,
+ bch_ptr_bad);
+
+ if (!b->level) {
+ if (!k) {
+ buf->last_scanned = b->key;
+ break;
+ }
+
+ buf->last_scanned = *k;
+ if (bkey_cmp(&buf->last_scanned, end) >= 0)
+ break;
+
+ if (buf->key_predicate(buf, k)) {
+ struct keybuf_key *w;
+
+ pr_debug("%s", pkey(k));
+
+ spin_lock(&buf->lock);
+
+ w = array_alloc(&buf->freelist);
+
+ w->private = NULL;
+ bkey_copy(&w->key, k);
+
+ if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+ array_free(&buf->freelist, w);
+
+ spin_unlock(&buf->lock);
+ }
+ } else {
+ if (!k)
+ break;
+
+ btree(refill_keybuf, k, b, op, buf, end);
+ /*
+ * Might get an error here, but can't really do anything
+ * and it'll get logged elsewhere. Just read what we
+ * can.
+ */
+
+ if (bkey_cmp(&buf->last_scanned, end) >= 0)
+ break;
+
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+ struct bkey *end)
+{
+ struct bkey start = buf->last_scanned;
+ struct btree_op op;
+ bch_btree_op_init_stack(&op);
+
+ cond_resched();
+
+ btree_root(refill_keybuf, c, &op, buf, end);
+ closure_sync(&op.cl);
+
+ pr_debug("found %s keys from %llu:%llu to %llu:%llu",
+ RB_EMPTY_ROOT(&buf->keys) ? "no" :
+ array_freelist_empty(&buf->freelist) ? "some" : "a few",
+ KEY_INODE(&start), KEY_OFFSET(&start),
+ KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
+
+ spin_lock(&buf->lock);
+
+ if (!RB_EMPTY_ROOT(&buf->keys)) {
+ struct keybuf_key *w;
+ w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+ buf->start = START_KEY(&w->key);
+
+ w = RB_LAST(&buf->keys, struct keybuf_key, node);
+ buf->end = w->key;
+ } else {
+ buf->start = MAX_KEY;
+ buf->end = MAX_KEY;
+ }
+
+ spin_unlock(&buf->lock);
+}
+
+static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
+{
+ rb_erase(&w->node, &buf->keys);
+ array_free(&buf->freelist, w);
+}
+
+void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
+{
+ spin_lock(&buf->lock);
+ __bch_keybuf_del(buf, w);
+ spin_unlock(&buf->lock);
+}
+
+bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ struct bkey *end)
+{
+ bool ret = false;
+ struct keybuf_key *p, *w, s;
+ s.key = *start;
+
+ if (bkey_cmp(end, &buf->start) <= 0 ||
+ bkey_cmp(start, &buf->end) >= 0)
+ return false;
+
+ spin_lock(&buf->lock);
+ w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
+
+ while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
+ p = w;
+ w = RB_NEXT(w, node);
+
+ if (p->private)
+ ret = true;
+ else
+ __bch_keybuf_del(buf, p);
+ }
+
+ spin_unlock(&buf->lock);
+ return ret;
+}
+
+struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
+{
+ struct keybuf_key *w;
+ spin_lock(&buf->lock);
+
+ w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+
+ while (w && w->private)
+ w = RB_NEXT(w, node);
+
+ if (w)
+ w->private = ERR_PTR(-EINTR);
+
+ spin_unlock(&buf->lock);
+ return w;
+}
+
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
+ struct keybuf *buf,
+ struct bkey *end)
+{
+ struct keybuf_key *ret;
+
+ while (1) {
+ ret = bch_keybuf_next(buf);
+ if (ret)
+ break;
+
+ if (bkey_cmp(&buf->last_scanned, end) >= 0) {
+ pr_debug("scan finished");
+ break;
+ }
+
+ bch_refill_keybuf(c, buf, end);
+ }
+
+ return ret;
+}
+
+void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn)
+{
+ buf->key_predicate = fn;
+ buf->last_scanned = MAX_KEY;
+ buf->keys = RB_ROOT;
+
+ spin_lock_init(&buf->lock);
+ array_allocator_init(&buf->freelist);
+}
+
+void bch_btree_exit(void)
+{
+ if (btree_io_wq)
+ destroy_workqueue(btree_io_wq);
+ if (bch_gc_wq)
+ destroy_workqueue(bch_gc_wq);
+}
+
+int __init bch_btree_init(void)
+{
+ if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
+ !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
new file mode 100644
index 0000000..af4a709
--- /dev/null
+++ b/drivers/md/bcache/btree.h
@@ -0,0 +1,405 @@
+#ifndef _BCACHE_BTREE_H
+#define _BCACHE_BTREE_H
+
+/*
+ * THE BTREE:
+ *
+ * At a high level, bcache's btree is relatively standard b+ tree. All keys and
+ * pointers are in the leaves; interior nodes only have pointers to the child
+ * nodes.
+ *
+ * In the interior nodes, a struct bkey always points to a child btree node, and
+ * the key is the highest key in the child node - except that the highest key in
+ * an interior node is always MAX_KEY. The size field refers to the size on disk
+ * of the child node - this would allow us to have variable sized btree nodes
+ * (handy for keeping the depth of the btree 1 by expanding just the root).
+ *
+ * Btree nodes are themselves log structured, but this is hidden fairly
+ * thoroughly. Btree nodes on disk will in practice have extents that overlap
+ * (because they were written at different times), but in memory we never have
+ * overlapping extents - when we read in a btree node from disk, the first thing
+ * we do is resort all the sets of keys with a mergesort, and in the same pass
+ * we check for overlapping extents and adjust them appropriately.
+ *
+ * struct btree_op is a central interface to the btree code. It's used for
+ * specifying read vs. write locking, and the embedded closure is used for
+ * waiting on IO or reserve memory.
+ *
+ * BTREE CACHE:
+ *
+ * Btree nodes are cached in memory; traversing the btree might require reading
+ * in btree nodes which is handled mostly transparently.
+ *
+ * bch_btree_node_get() looks up a btree node in the cache and reads it in from
+ * disk if necessary. This function is almost never called directly though - the
+ * btree() macro is used to get a btree node, call some function on it, and
+ * unlock the node after the function returns.
+ *
+ * The root is special cased - it's taken out of the cache's lru (thus pinning
+ * it in memory), so we can find the root of the btree by just dereferencing a
+ * pointer instead of looking it up in the cache. This makes locking a bit
+ * tricky, since the root pointer is protected by the lock in the btree node it
+ * points to - the btree_root() macro handles this.
+ *
+ * In various places we must be able to allocate memory for multiple btree nodes
+ * in order to make forward progress. To do this we use the btree cache itself
+ * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
+ * cache we can reuse. We can't allow more than one thread to be doing this at a
+ * time, so there's a lock, implemented by a pointer to the btree_op closure -
+ * this allows the btree_root() macro to implicitly release this lock.
+ *
+ * BTREE IO:
+ *
+ * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
+ * this.
+ *
+ * For writing, we have two btree_write structs embeddded in struct btree - one
+ * write in flight, and one being set up, and we toggle between them.
+ *
+ * Writing is done with a single function - bch_btree_write() really serves two
+ * different purposes and should be broken up into two different functions. When
+ * passing now = false, it merely indicates that the node is now dirty - calling
+ * it ensures that the dirty keys will be written at some point in the future.
+ *
+ * When passing now = true, bch_btree_write() causes a write to happen
+ * "immediately" (if there was already a write in flight, it'll cause the write
+ * to happen as soon as the previous write completes). It returns immediately
+ * though - but it takes a refcount on the closure in struct btree_op you passed
+ * to it, so a closure_sync() later can be used to wait for the write to
+ * complete.
+ *
+ * This is handy because btree_split() and garbage collection can issue writes
+ * in parallel, reducing the amount of time they have to hold write locks.
+ *
+ * LOCKING:
+ *
+ * When traversing the btree, we may need write locks starting at some level -
+ * inserting a key into the btree will typically only require a write lock on
+ * the leaf node.
+ *
+ * This is specified with the lock field in struct btree_op; lock = 0 means we
+ * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
+ * checks this field and returns the node with the appropriate lock held.
+ *
+ * If, after traversing the btree, the insertion code discovers it has to split
+ * then it must restart from the root and take new locks - to do this it changes
+ * the lock field and returns -EINTR, which causes the btree_root() macro to
+ * loop.
+ *
+ * Handling cache misses require a different mechanism for upgrading to a write
+ * lock. We do cache lookups with only a read lock held, but if we get a cache
+ * miss and we wish to insert this data into the cache, we have to insert a
+ * placeholder key to detect races - otherwise, we could race with a write and
+ * overwrite the data that was just written to the cache with stale data from
+ * the backing device.
+ *
+ * For this we use a sequence number that write locks and unlocks increment - to
+ * insert the check key it unlocks the btree node and then takes a write lock,
+ * and fails if the sequence number doesn't match.
+ */
+
+#include "bset.h"
+#include "debug.h"
+
+struct btree_write {
+ struct closure *owner;
+ atomic_t *journal;
+
+ /* If btree_split() frees a btree node, it writes a new pointer to that
+ * btree node indicating it was freed; it takes a refcount on
+ * c->prio_blocked because we can't write the gens until the new
+ * pointer is on disk. This allows btree_write_endio() to release the
+ * refcount that btree_split() took.
+ */
+ int prio_blocked;
+};
+
+struct btree {
+ /* Hottest entries first */
+ struct hlist_node hash;
+
+ /* Key/pointer for this btree node */
+ BKEY_PADDED(key);
+
+ /* Single bit - set when accessed, cleared by shrinker */
+ unsigned long accessed;
+ unsigned long seq;
+ struct rw_semaphore lock;
+ struct cache_set *c;
+
+ unsigned long flags;
+ uint16_t written; /* would be nice to kill */
+ uint8_t level;
+ uint8_t nsets;
+ uint8_t page_order;
+
+ /*
+ * Set of sorted keys - the real btree node - plus a binary search tree
+ *
+ * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+ * to the memory we have allocated for this btree node. Additionally,
+ * set[0]->data points to the entire btree node as it exists on disk.
+ */
+ struct bset_tree sets[MAX_BSETS];
+
+ /* Used to refcount bio splits, also protects b->bio */
+ struct closure_with_waitlist io;
+
+ /* Gets transferred to w->prio_blocked - see the comment there */
+ int prio_blocked;
+
+ struct list_head list;
+ struct delayed_work work;
+
+ uint64_t io_start_time;
+ struct btree_write writes[2];
+ struct bio *bio;
+};
+
+#define BTREE_FLAG(flag) \
+static inline bool btree_node_ ## flag(struct btree *b) \
+{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
+ \
+static inline void set_btree_node_ ## flag(struct btree *b) \
+{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
+
+enum btree_flags {
+ BTREE_NODE_read_done,
+ BTREE_NODE_io_error,
+ BTREE_NODE_dirty,
+ BTREE_NODE_write_idx,
+};
+
+BTREE_FLAG(read_done);
+BTREE_FLAG(io_error);
+BTREE_FLAG(dirty);
+BTREE_FLAG(write_idx);
+
+static inline struct btree_write *btree_current_write(struct btree *b)
+{
+ return b->writes + btree_node_write_idx(b);
+}
+
+static inline struct btree_write *btree_prev_write(struct btree *b)
+{
+ return b->writes + (btree_node_write_idx(b) ^ 1);
+}
+
+static inline unsigned bset_offset(struct btree *b, struct bset *i)
+{
+ return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
+}
+
+static inline struct bset *write_block(struct btree *b)
+{
+ return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+}
+
+static inline bool bset_written(struct btree *b, struct bset_tree *t)
+{
+ return t->data < write_block(b);
+}
+
+static inline bool bkey_written(struct btree *b, struct bkey *k)
+{
+ return k < write_block(b)->start;
+}
+
+static inline void set_gc_sectors(struct cache_set *c)
+{
+ atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8);
+}
+
+static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
+{
+ return __bch_ptr_invalid(b->c, b->level, k);
+}
+
+static inline struct bkey *bch_btree_iter_init(struct btree *b,
+ struct btree_iter *iter,
+ struct bkey *search)
+{
+ return __bch_btree_iter_init(b, iter, search, b->sets);
+}
+
+/* Looping macros */
+
+#define for_each_cached_btree(b, c, iter) \
+ for (iter = 0; \
+ iter < ARRAY_SIZE((c)->bucket_hash); \
+ iter++) \
+ hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
+
+#define for_each_key_filter(b, k, iter, filter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next_filter((iter), b, filter));)
+
+#define for_each_key(b, k, iter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next(iter));)
+
+/* Recursing down the btree */
+
+struct btree_op {
+ struct closure cl;
+ struct cache_set *c;
+
+ /* Journal entry we have a refcount on */
+ atomic_t *journal;
+
+ /* Bio to be inserted into the cache */
+ struct bio *cache_bio;
+
+ unsigned inode;
+
+ uint16_t write_prio;
+
+ /* Btree level at which we start taking write locks */
+ short lock;
+
+ /* Btree insertion type */
+ enum {
+ BTREE_INSERT,
+ BTREE_REPLACE
+ } type:8;
+
+ unsigned csum:1;
+ unsigned skip:1;
+ unsigned flush_journal:1;
+
+ unsigned insert_data_done:1;
+ unsigned lookup_done:1;
+ unsigned insert_collision:1;
+
+ /* Anything after this point won't get zeroed in do_bio_hook() */
+
+ /* Keys to be inserted */
+ struct keylist keys;
+ BKEY_PADDED(replace);
+};
+
+void bch_btree_op_init_stack(struct btree_op *);
+
+static inline void rw_lock(bool w, struct btree *b, int level)
+{
+ w ? down_write_nested(&b->lock, level + 1)
+ : down_read_nested(&b->lock, level + 1);
+ if (w)
+ b->seq++;
+}
+
+static inline void rw_unlock(bool w, struct btree *b)
+{
+#ifdef CONFIG_BCACHE_EDEBUG
+ unsigned i;
+
+ if (w &&
+ b->key.ptr[0] &&
+ btree_node_read_done(b))
+ for (i = 0; i <= b->nsets; i++)
+ bch_check_key_order(b, b->sets[i].data);
+#endif
+
+ if (w)
+ b->seq++;
+ (w ? up_write : up_read)(&b->lock);
+}
+
+#define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn: function to call, which will be passed the child node
+ * @key: key to recurse on
+ * @b: parent btree node
+ * @op: pointer to struct btree_op
+ */
+#define btree(fn, key, b, op, ...) \
+({ \
+ int _r, l = (b)->level - 1; \
+ bool _w = l <= (op)->lock; \
+ struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
+ if (!IS_ERR(_b)) { \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _b); \
+ } else \
+ _r = PTR_ERR(_b); \
+ _r; \
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn: function to call, which will be passed the child node
+ * @c: cache set
+ * @op: pointer to struct btree_op
+ */
+#define btree_root(fn, c, op, ...) \
+({ \
+ int _r = -EINTR; \
+ do { \
+ struct btree *_b = (c)->root; \
+ bool _w = insert_lock(op, _b); \
+ rw_lock(_w, _b, _b->level); \
+ if (_b == (c)->root && \
+ _w == insert_lock(op, _b)) \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c, &(op)->cl); \
+ } while (_r == -EINTR); \
+ \
+ _r; \
+})
+
+static inline bool should_split(struct btree *b)
+{
+ struct bset *i = write_block(b);
+ return b->written >= btree_blocks(b) ||
+ (i->seq == b->sets[0].data->seq &&
+ b->written + __set_blocks(i, i->keys + 15, b->c)
+ > btree_blocks(b));
+}
+
+void bch_btree_read_done(struct closure *);
+void bch_btree_read(struct btree *);
+void bch_btree_write(struct btree *b, bool now, struct btree_op *op);
+
+void bch_cannibalize_unlock(struct cache_set *, struct closure *);
+void bch_btree_set_root(struct btree *);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
+struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
+ int, struct btree_op *);
+
+bool bch_btree_insert_keys(struct btree *, struct btree_op *);
+bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
+ struct bio *);
+int bch_btree_insert(struct btree_op *, struct cache_set *);
+
+int bch_btree_search_recurse(struct btree *, struct btree_op *);
+
+void bch_queue_gc(struct cache_set *);
+size_t bch_btree_gc_finish(struct cache_set *);
+void bch_moving_gc(struct closure *);
+int bch_btree_check(struct cache_set *, struct btree_op *);
+uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+
+void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *);
+bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
+ struct bkey *);
+void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
+struct keybuf_key *bch_keybuf_next(struct keybuf *);
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *,
+ struct keybuf *, struct bkey *);
+
+#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
new file mode 100644
index 0000000..bd05a9a
--- /dev/null
+++ b/drivers/md/bcache/closure.c
@@ -0,0 +1,345 @@
+/*
+ * Asynchronous refcounty things
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#include "closure.h"
+
+void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
+EXPORT_SYMBOL_GPL(closure_queue);
+
+#define CL_FIELD(type, field) \
+ case TYPE_ ## type: \
+ return &container_of(cl, struct type, cl)->field
+
+static struct closure_waitlist *closure_waitlist(struct closure *cl)
+{
+ switch (cl->type) {
+ CL_FIELD(closure_with_waitlist, wait);
+ CL_FIELD(closure_with_waitlist_and_timer, wait);
+ default:
+ return NULL;
+ }
+}
+
+static struct timer_list *closure_timer(struct closure *cl)
+{
+ switch (cl->type) {
+ CL_FIELD(closure_with_timer, timer);
+ CL_FIELD(closure_with_waitlist_and_timer, timer);
+ default:
+ return NULL;
+ }
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ int r = flags & CLOSURE_REMAINING_MASK;
+
+ BUG_ON(flags & CLOSURE_GUARD_MASK);
+ BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+
+ /* Must deliver precisely one wakeup */
+ if (r == 1 && (flags & CLOSURE_SLEEPING))
+ wake_up_process(cl->task);
+
+ if (!r) {
+ if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
+ /* CLOSURE_BLOCKING might be set - clear it */
+ atomic_set(&cl->remaining,
+ CLOSURE_REMAINING_INITIALIZER);
+ closure_queue(cl);
+ } else {
+ struct closure *parent = cl->parent;
+ struct closure_waitlist *wait = closure_waitlist(cl);
+
+ closure_debug_destroy(cl);
+
+ atomic_set(&cl->remaining, -1);
+
+ if (wait)
+ closure_wake_up(wait);
+
+ if (cl->fn)
+ cl->fn(cl);
+
+ if (parent)
+ closure_put(parent);
+ }
+ }
+}
+
+/* For clearing flags with the same atomic op as a put */
+void closure_sub(struct closure *cl, int v)
+{
+ closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+}
+EXPORT_SYMBOL_GPL(closure_sub);
+
+void closure_put(struct closure *cl)
+{
+ closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+}
+EXPORT_SYMBOL_GPL(closure_put);
+
+static void set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ cl->waiting_on = f;
+#endif
+}
+
+void __closure_wake_up(struct closure_waitlist *wait_list)
+{
+ struct llist_node *list;
+ struct closure *cl;
+ struct llist_node *reverse = NULL;
+
+ list = llist_del_all(&wait_list->list);
+
+ /* We first reverse the list to preserve FIFO ordering and fairness */
+
+ while (list) {
+ struct llist_node *t = list;
+ list = llist_next(list);
+
+ t->next = reverse;
+ reverse = t;
+ }
+
+ /* Then do the wakeups */
+
+ while (reverse) {
+ cl = container_of(reverse, struct closure, list);
+ reverse = llist_next(reverse);
+
+ set_waiting(cl, 0);
+ closure_sub(cl, CLOSURE_WAITING + 1);
+ }
+}
+EXPORT_SYMBOL_GPL(__closure_wake_up);
+
+bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+{
+ if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+ return false;
+
+ set_waiting(cl, _RET_IP_);
+ atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
+ llist_add(&cl->list, &list->list);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(closure_wait);
+
+/**
+ * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+void closure_sync(struct closure *cl)
+{
+ while (1) {
+ __closure_start_sleep(cl);
+ closure_set_ret_ip(cl);
+
+ if ((atomic_read(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) == 1)
+ break;
+
+ schedule();
+ }
+
+ __closure_end_sleep(cl);
+}
+EXPORT_SYMBOL_GPL(closure_sync);
+
+/**
+ * closure_trylock() - try to acquire the closure, without waiting
+ * @cl: closure to lock
+ *
+ * Returns true if the closure was succesfully locked.
+ */
+bool closure_trylock(struct closure *cl, struct closure *parent)
+{
+ if (atomic_cmpxchg(&cl->remaining, -1,
+ CLOSURE_REMAINING_INITIALIZER) != -1)
+ return false;
+
+ closure_set_ret_ip(cl);
+
+ smp_mb();
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ closure_debug_create(cl);
+ return true;
+}
+EXPORT_SYMBOL_GPL(closure_trylock);
+
+void __closure_lock(struct closure *cl, struct closure *parent,
+ struct closure_waitlist *wait_list)
+{
+ struct closure wait;
+ closure_init_stack(&wait);
+
+ while (1) {
+ if (closure_trylock(cl, parent))
+ return;
+
+ closure_wait_event_sync(wait_list, &wait,
+ atomic_read(&cl->remaining) == -1);
+ }
+}
+EXPORT_SYMBOL_GPL(__closure_lock);
+
+static void closure_delay_timer_fn(unsigned long data)
+{
+ struct closure *cl = (struct closure *) data;
+ closure_sub(cl, CLOSURE_TIMER + 1);
+}
+
+void do_closure_timer_init(struct closure *cl)
+{
+ struct timer_list *timer = closure_timer(cl);
+
+ init_timer(timer);
+ timer->data = (unsigned long) cl;
+ timer->function = closure_delay_timer_fn;
+}
+EXPORT_SYMBOL_GPL(do_closure_timer_init);
+
+bool __closure_delay(struct closure *cl, unsigned long delay,
+ struct timer_list *timer)
+{
+ if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
+ return false;
+
+ BUG_ON(timer_pending(timer));
+
+ timer->expires = jiffies + delay;
+
+ atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
+ add_timer(timer);
+ return true;
+}
+EXPORT_SYMBOL_GPL(__closure_delay);
+
+void __closure_flush(struct closure *cl, struct timer_list *timer)
+{
+ if (del_timer(timer))
+ closure_sub(cl, CLOSURE_TIMER + 1);
+}
+EXPORT_SYMBOL_GPL(__closure_flush);
+
+void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
+{
+ if (del_timer_sync(timer))
+ closure_sub(cl, CLOSURE_TIMER + 1);
+}
+EXPORT_SYMBOL_GPL(__closure_flush_sync);
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+
+static LIST_HEAD(closure_list);
+static DEFINE_SPINLOCK(closure_list_lock);
+
+void closure_debug_create(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_ALIVE;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_add(&cl->all, &closure_list);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(closure_debug_create);
+
+void closure_debug_destroy(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_DEAD;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_del(&cl->all);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(closure_debug_destroy);
+
+static struct dentry *debug;
+
+#define work_data_bits(work) ((unsigned long *)(&(work)->data))
+
+static int debug_seq_show(struct seq_file *f, void *data)
+{
+ struct closure *cl;
+ spin_lock_irq(&closure_list_lock);
+
+ list_for_each_entry(cl, &closure_list, all) {
+ int r = atomic_read(&cl->remaining);
+
+ seq_printf(f, "%p: %pF -> %pf p %p r %i ",
+ cl, (void *) cl->ip, cl->fn, cl->parent,
+ r & CLOSURE_REMAINING_MASK);
+
+ seq_printf(f, "%s%s%s%s%s%s\n",
+ test_bit(WORK_STRUCT_PENDING,
+ work_data_bits(&cl->work)) ? "Q" : "",
+ r & CLOSURE_RUNNING ? "R" : "",
+ r & CLOSURE_BLOCKING ? "B" : "",
+ r & CLOSURE_STACK ? "S" : "",
+ r & CLOSURE_SLEEPING ? "Sl" : "",
+ r & CLOSURE_TIMER ? "T" : "");
+
+ if (r & CLOSURE_WAITING)
+ seq_printf(f, " W %pF\n",
+ (void *) cl->waiting_on);
+
+ seq_printf(f, "\n");
+ }
+
+ spin_unlock_irq(&closure_list_lock);
+ return 0;
+}
+
+static int debug_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_seq_show, NULL);
+}
+
+static const struct file_operations debug_ops = {
+ .owner = THIS_MODULE,
+ .open = debug_seq_open,
+ .read = seq_read,
+ .release = single_release
+};
+
+void __init closure_debug_init(void)
+{
+ debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
+}
+
+#endif
+
+MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
new file mode 100644
index 0000000..0003992
--- /dev/null
+++ b/drivers/md/bcache/closure.h
@@ -0,0 +1,672 @@
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, is a macro that returns the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio, int error)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * For a closure to wait on an arbitrary event, we need to introduce waitlists:
+ *
+ * struct closure_waitlist list;
+ * closure_wait_event(list, cl, condition);
+ * closure_wake_up(wait_list);
+ *
+ * These work analagously to wait_event() and wake_up() - except that instead of
+ * operating on the current thread (for wait_event()) and lists of threads, they
+ * operate on an explicit closure and lists of closures.
+ *
+ * Because it's a closure we can now wait either synchronously or
+ * asynchronously. closure_wait_event() returns the current value of the
+ * condition, and if it returned false continue_at() or closure_sync() can be
+ * used to wait for it to become true.
+ *
+ * It's useful for waiting on things when you can't sleep in the context in
+ * which you must check the condition (perhaps a spinlock held, or you might be
+ * beneath generic_make_request() - in which case you can't sleep on IO).
+ *
+ * closure_wait_event() will wait either synchronously or asynchronously,
+ * depending on whether the closure is in blocking mode or not. You can pick a
+ * mode explicitly with closure_wait_event_sync() and
+ * closure_wait_event_async(), which do just what you might expect.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ *
+ * Locking:
+ *
+ * Closures are based on work items but they can be thought of as more like
+ * threads - in that like threads and unlike work items they have a well
+ * defined lifetime; they are created (with closure_init()) and eventually
+ * complete after a continue_at(cl, NULL, NULL).
+ *
+ * Suppose you've got some larger structure with a closure embedded in it that's
+ * used for periodically doing garbage collection. You only want one garbage
+ * collection happening at a time, so the natural thing to do is protect it with
+ * a lock. However, it's difficult to use a lock protecting a closure correctly
+ * because the unlock should come after the last continue_to() (additionally, if
+ * you're using the closure asynchronously a mutex won't work since a mutex has
+ * to be unlocked by the same process that locked it).
+ *
+ * So to make it less error prone and more efficient, we also have the ability
+ * to use closures as locks:
+ *
+ * closure_init_unlocked();
+ * closure_trylock();
+ *
+ * That's all we need for trylock() - the last closure_put() implicitly unlocks
+ * it for you. But for closure_lock(), we also need a wait list:
+ *
+ * struct closure_with_waitlist frobnicator_cl;
+ *
+ * closure_init_unlocked(&frobnicator_cl);
+ * closure_lock(&frobnicator_cl);
+ *
+ * A closure_with_waitlist embeds a closure and a wait list - much like struct
+ * delayed_work embeds a work item and a timer_list. The important thing is, use
+ * it exactly like you would a regular closure and closure_put() will magically
+ * handle everything for you.
+ *
+ * We've got closures that embed timers, too. They're called, appropriately
+ * enough:
+ * struct closure_with_timer;
+ *
+ * This gives you access to closure_delay(). It takes a refcount for a specified
+ * number of jiffies - you could then call closure_sync() (for a slightly
+ * convoluted version of msleep()) or continue_at() - which gives you the same
+ * effect as using a delayed work item, except you can reuse the work_struct
+ * already embedded in struct closure.
+ *
+ * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
+ * probably expect, if you happen to need the features of both. (You don't
+ * really want to know how all this is implemented, but if I've done my job
+ * right you shouldn't have to care).
+ */
+
+struct closure;
+typedef void (closure_fn) (struct closure *);
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_type {
+ TYPE_closure = 0,
+ TYPE_closure_with_waitlist = 1,
+ TYPE_closure_with_timer = 2,
+ TYPE_closure_with_waitlist_and_timer = 3,
+ MAX_CLOSURE_TYPE = 3,
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
+ * waiting asynchronously
+ *
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
+ * - indicates that cl->task is valid and closure_put() may wake it up.
+ * Only set or cleared by the thread that owns the closure.
+ *
+ * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
+ * has an outstanding timer. Must be set by the thread that owns the
+ * closure, and cleared by the timer function when the timer goes off.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ *
+ * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
+ * closure with this flag set
+ */
+
+ CLOSURE_BITS_START = (1 << 19),
+ CLOSURE_DESTRUCTOR = (1 << 19),
+ CLOSURE_BLOCKING = (1 << 21),
+ CLOSURE_WAITING = (1 << 23),
+ CLOSURE_SLEEPING = (1 << 25),
+ CLOSURE_TIMER = (1 << 27),
+ CLOSURE_RUNNING = (1 << 29),
+ CLOSURE_STACK = (1 << 31),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \
+ CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct task_struct *task;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+
+ enum closure_type type;
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+
+ unsigned magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+struct closure_with_waitlist {
+ struct closure cl;
+ struct closure_waitlist wait;
+};
+
+struct closure_with_timer {
+ struct closure cl;
+ struct timer_list timer;
+};
+
+struct closure_with_waitlist_and_timer {
+ struct closure cl;
+ struct closure_waitlist wait;
+ struct timer_list timer;
+};
+
+extern unsigned invalid_closure_type(void);
+
+#define __CLOSURE_TYPE(cl, _t) \
+ __builtin_types_compatible_p(typeof(cl), struct _t) \
+ ? TYPE_ ## _t : \
+
+#define __closure_type(cl) \
+( \
+ __CLOSURE_TYPE(cl, closure) \
+ __CLOSURE_TYPE(cl, closure_with_waitlist) \
+ __CLOSURE_TYPE(cl, closure_with_timer) \
+ __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \
+ invalid_closure_type() \
+)
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void closure_queue(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void closure_sync(struct closure *cl);
+
+bool closure_trylock(struct closure *cl, struct closure *parent);
+void __closure_lock(struct closure *cl, struct closure *parent,
+ struct closure_waitlist *wait_list);
+
+void do_closure_timer_init(struct closure *cl);
+bool __closure_delay(struct closure *cl, unsigned long delay,
+ struct timer_list *timer);
+void __closure_flush(struct closure *cl, struct timer_list *timer);
+void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+
+void closure_debug_init(void);
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_init(void) {}
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline bool closure_is_stopped(struct closure *cl)
+{
+ return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
+}
+
+static inline bool closure_is_unlocked(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) == -1;
+}
+
+static inline void do_closure_init(struct closure *cl, struct closure *parent,
+ bool running)
+{
+ switch (cl->type) {
+ case TYPE_closure_with_timer:
+ case TYPE_closure_with_waitlist_and_timer:
+ do_closure_timer_init(cl);
+ default:
+ break;
+ }
+
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ if (running) {
+ closure_debug_create(cl);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+ } else
+ atomic_set(&cl->remaining, -1);
+
+ closure_set_ip(cl);
+}
+
+/*
+ * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
+ * the result of __closure_type() is thrown away, it's used merely for type
+ * checking.
+ */
+#define __to_internal_closure(cl) \
+({ \
+ BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE); \
+ (struct closure *) cl; \
+})
+
+#define closure_init_type(cl, parent, running) \
+do { \
+ struct closure *_cl = __to_internal_closure(cl); \
+ _cl->type = __closure_type(*(cl)); \
+ do_closure_init(_cl, parent, running); \
+} while (0)
+
+/**
+ * __closure_init() - Initialize a closure, skipping the memset()
+ *
+ * May be used instead of closure_init() when memory has already been zeroed.
+ */
+#define __closure_init(cl, parent) \
+ closure_init_type(cl, parent, true)
+
+/**
+ * closure_init() - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+#define closure_init(cl, parent) \
+do { \
+ memset((cl), 0, sizeof(*(cl))); \
+ __closure_init(cl, parent); \
+} while (0)
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
+ CLOSURE_BLOCKING|CLOSURE_STACK);
+}
+
+/**
+ * closure_init_unlocked() - Initialize a closure but leave it unlocked.
+ * @cl: closure to initialize
+ *
+ * For when the closure will be used as a lock. The closure may not be used
+ * until after a closure_lock() or closure_trylock().
+ */
+#define closure_init_unlocked(cl) \
+do { \
+ memset((cl), 0, sizeof(*(cl))); \
+ closure_init_type(cl, NULL, false); \
+} while (0)
+
+/**
+ * closure_lock() - lock and initialize a closure.
+ * @cl: the closure to lock
+ * @parent: the new parent for this closure
+ *
+ * The closure must be of one of the types that has a waitlist (otherwise we
+ * wouldn't be able to sleep on contention).
+ *
+ * @parent has exactly the same meaning as in closure_init(); if non null, the
+ * closure will take a reference on @parent which will be released when it is
+ * unlocked.
+ */
+#define closure_lock(cl, parent) \
+ __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
+
+/**
+ * closure_delay() - delay some number of jiffies
+ * @cl: the closure that will sleep
+ * @delay: the delay in jiffies
+ *
+ * Takes a refcount on @cl which will be released after @delay jiffies; this may
+ * be used to have a function run after a delay with continue_at(), or
+ * closure_sync() may be used for a convoluted version of msleep().
+ */
+#define closure_delay(cl, delay) \
+ __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
+
+#define closure_flush(cl) \
+ __closure_flush(__to_internal_closure(cl), &(cl)->timer)
+
+#define closure_flush_sync(cl) \
+ __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
+
+static inline void __closure_end_sleep(struct closure *cl)
+{
+ __set_current_state(TASK_RUNNING);
+
+ if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
+ atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
+}
+
+static inline void __closure_start_sleep(struct closure *cl)
+{
+ closure_set_ip(cl);
+ cl->task = current;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+ atomic_add(CLOSURE_SLEEPING, &cl->remaining);
+}
+
+/**
+ * closure_blocking() - returns true if the closure is in blocking mode.
+ *
+ * If a closure is in blocking mode, closure_wait_event() will sleep until the
+ * condition is true instead of waiting asynchronously.
+ */
+static inline bool closure_blocking(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
+}
+
+/**
+ * set_closure_blocking() - put a closure in blocking mode.
+ *
+ * If a closure is in blocking mode, closure_wait_event() will sleep until the
+ * condition is true instead of waiting asynchronously.
+ *
+ * Not thread safe - can only be called by the thread running the closure.
+ */
+static inline void set_closure_blocking(struct closure *cl)
+{
+ if (!closure_blocking(cl))
+ atomic_add(CLOSURE_BLOCKING, &cl->remaining);
+}
+
+/*
+ * Not thread safe - can only be called by the thread running the closure.
+ */
+static inline void clear_closure_blocking(struct closure *cl)
+{
+ if (closure_blocking(cl))
+ atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
+}
+
+/**
+ * closure_wake_up() - wake up all closures on a wait list.
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+/*
+ * Wait on an event, synchronously or asynchronously - analogous to wait_event()
+ * but for closures.
+ *
+ * The loop is oddly structured so as to avoid a race; we must check the
+ * condition again after we've added ourself to the waitlist. We know if we were
+ * already on the waitlist because closure_wait() returns false; thus, we only
+ * schedule or break if closure_wait() returns false. If it returns true, we
+ * just loop again - rechecking the condition.
+ *
+ * The __closure_wake_up() is necessary because we may race with the event
+ * becoming true; i.e. we see event false -> wait -> recheck condition, but the
+ * thread that made the event true may have called closure_wake_up() before we
+ * added ourself to the wait list.
+ *
+ * We have to call closure_sync() at the end instead of just
+ * __closure_end_sleep() because a different thread might've called
+ * closure_wake_up() before us and gotten preempted before they dropped the
+ * refcount on our closure. If this was a stack allocated closure, that would be
+ * bad.
+ */
+#define __closure_wait_event(list, cl, condition, _block) \
+({ \
+ bool block = _block; \
+ typeof(condition) ret; \
+ \
+ while (1) { \
+ ret = (condition); \
+ if (ret) { \
+ __closure_wake_up(list); \
+ if (block) \
+ closure_sync(cl); \
+ \
+ break; \
+ } \
+ \
+ if (block) \
+ __closure_start_sleep(cl); \
+ \
+ if (!closure_wait(list, cl)) { \
+ if (!block) \
+ break; \
+ \
+ schedule(); \
+ } \
+ } \
+ \
+ ret; \
+})
+
+/**
+ * closure_wait_event() - wait on a condition, synchronously or asynchronously.
+ * @list: the wait list to wait on
+ * @cl: the closure that is doing the waiting
+ * @condition: a C expression for the event to wait for
+ *
+ * If the closure is in blocking mode, sleeps until the @condition evaluates to
+ * true - exactly like wait_event().
+ *
+ * If the closure is not in blocking mode, waits asynchronously; if the
+ * condition is currently false the @cl is put onto @list and returns. @list
+ * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
+ * wait for another thread to wake up @list, which drops the refcount on @cl.
+ *
+ * Returns the value of @condition; @cl will be on @list iff @condition was
+ * false.
+ *
+ * closure_wake_up(@list) must be called after changing any variable that could
+ * cause @condition to become true.
+ */
+#define closure_wait_event(list, cl, condition) \
+ __closure_wait_event(list, cl, condition, closure_blocking(cl))
+
+#define closure_wait_event_async(list, cl, condition) \
+ __closure_wait_event(list, cl, condition, false)
+
+#define closure_wait_event_sync(list, cl, condition) \
+ __closure_wait_event(list, cl, condition, true)
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ BUG_ON(object_is_on_stack(cl));
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+ /* between atomic_dec() in closure_put() */
+ smp_mb__before_atomic_dec();
+}
+
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+ return; \
+} while (0)
+
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(cl); \
+ return; \
+} while (0)
+
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+ return; \
+} while (0)
+
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ if (closure_trylock(cl, parent))
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
new file mode 100644
index 0000000..89fd520
--- /dev/null
+++ b/drivers/md/bcache/debug.c
@@ -0,0 +1,565 @@
+/*
+ * Assorted bcache debug code
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+static struct dentry *debug;
+
+const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->sb.bucket_size)
+ return "bad, length too big";
+ if (bucket < ca->sb.first_bucket)
+ return "bad, short offset";
+ if (bucket >= ca->sb.nbuckets)
+ return "bad, offset past end of device";
+ if (ptr_stale(c, k, i))
+ return "stale";
+ }
+
+ if (!bkey_cmp(k, &ZERO_KEY))
+ return "bad, null key";
+ if (!KEY_PTRS(k))
+ return "bad, no pointers";
+ if (!KEY_SIZE(k))
+ return "zeroed key";
+ return "";
+}
+
+struct keyprint_hack bch_pkey(const struct bkey *k)
+{
+ unsigned i = 0;
+ struct keyprint_hack r;
+ char *out = r.s, *end = r.s + KEYHACK_SIZE;
+
+#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+
+ p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
+
+ if (KEY_PTRS(k))
+ while (1) {
+ p("%llu:%llu gen %llu",
+ PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
+
+ if (++i == KEY_PTRS(k))
+ break;
+
+ p(", ");
+ }
+
+ p("]");
+
+ if (KEY_DIRTY(k))
+ p(" dirty");
+ if (KEY_CSUM(k))
+ p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+ return r;
+}
+
+struct keyprint_hack bch_pbtree(const struct btree *b)
+{
+ struct keyprint_hack r;
+
+ snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0),
+ b->level, b->c->root ? b->c->root->level : -1);
+ return r;
+}
+
+#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
+
+static bool skipped_backwards(struct btree *b, struct bkey *k)
+{
+ return bkey_cmp(k, (!b->level)
+ ? &START_KEY(bkey_next(k))
+ : bkey_next(k)) > 0;
+}
+
+static void dump_bset(struct btree *b, struct bset *i)
+{
+ struct bkey *k;
+ unsigned j;
+
+ for (k = i->start; k < end(i); k = bkey_next(k)) {
+ printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
+ (uint64_t *) k - i->d, i->keys, pkey(k));
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+ printk(" bucket %zu", n);
+
+ if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+ printk(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+
+ printk(" %s\n", bch_ptr_status(b->c, k));
+
+ if (bkey_next(k) < end(i) &&
+ skipped_backwards(b, k))
+ printk(KERN_ERR "Key skipped backwards\n");
+ }
+}
+
+#endif
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_btree_verify(struct btree *b, struct bset *new)
+{
+ struct btree *v = b->c->verify_data;
+ struct closure cl;
+ closure_init_stack(&cl);
+
+ if (!b->c->verify)
+ return;
+
+ closure_wait_event(&b->io.wait, &cl,
+ atomic_read(&b->io.cl.remaining) == -1);
+
+ mutex_lock(&b->c->verify_lock);
+
+ bkey_copy(&v->key, &b->key);
+ v->written = 0;
+ v->level = b->level;
+
+ bch_btree_read(v);
+ closure_wait_event(&v->io.wait, &cl,
+ atomic_read(&b->io.cl.remaining) == -1);
+
+ if (new->keys != v->sets[0].data->keys ||
+ memcmp(new->start,
+ v->sets[0].data->start,
+ (void *) end(new) - (void *) new->start)) {
+ unsigned i, j;
+
+ console_lock();
+
+ printk(KERN_ERR "*** original memory node:\n");
+ for (i = 0; i <= b->nsets; i++)
+ dump_bset(b, b->sets[i].data);
+
+ printk(KERN_ERR "*** sorted memory node:\n");
+ dump_bset(b, new);
+
+ printk(KERN_ERR "*** on disk node:\n");
+ dump_bset(v, v->sets[0].data);
+
+ for (j = 0; j < new->keys; j++)
+ if (new->d[j] != v->sets[0].data->d[j])
+ break;
+
+ console_unlock();
+ panic("verify failed at %u\n", j);
+ }
+
+ mutex_unlock(&b->c->verify_lock);
+}
+
+static void data_verify_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ closure_put(cl);
+}
+
+void bch_data_verify(struct search *s)
+{
+ char name[BDEVNAME_SIZE];
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct closure *cl = &s->cl;
+ struct bio *check;
+ struct bio_vec *bv;
+ int i;
+
+ if (!s->unaligned_bvec)
+ bio_for_each_segment(bv, s->orig_bio, i)
+ bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
+
+ check = bio_clone(s->orig_bio, GFP_NOIO);
+ if (!check)
+ return;
+
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
+ goto out_put;
+
+ check->bi_rw = READ_SYNC;
+ check->bi_private = cl;
+ check->bi_end_io = data_verify_endio;
+
+ closure_bio_submit(check, cl, &dc->disk);
+ closure_sync(cl);
+
+ bio_for_each_segment(bv, s->orig_bio, i) {
+ void *p1 = kmap(bv->bv_page);
+ void *p2 = kmap(check->bi_io_vec[i].bv_page);
+
+ if (memcmp(p1 + bv->bv_offset,
+ p2 + bv->bv_offset,
+ bv->bv_len))
+ printk(KERN_ERR
+ "bcache (%s): verify failed at sector %llu\n",
+ bdevname(dc->bdev, name),
+ (uint64_t) s->orig_bio->bi_sector);
+
+ kunmap(bv->bv_page);
+ kunmap(check->bi_io_vec[i].bv_page);
+ }
+
+ __bio_for_each_segment(bv, check, i, 0)
+ __free_page(bv->bv_page);
+out_put:
+ bio_put(check);
+}
+
+#endif
+
+#ifdef CONFIG_BCACHE_EDEBUG
+
+unsigned bch_count_data(struct btree *b)
+{
+ unsigned ret = 0;
+ struct btree_iter iter;
+ struct bkey *k;
+
+ if (!b->level)
+ for_each_key(b, k, &iter)
+ ret += KEY_SIZE(k);
+ return ret;
+}
+
+static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
+ va_list args)
+{
+ unsigned i;
+
+ console_lock();
+
+ for (i = 0; i <= b->nsets; i++)
+ dump_bset(b, b->sets[i].data);
+
+ vprintk(fmt, args);
+
+ console_unlock();
+
+ panic("at %s\n", pbtree(b));
+}
+
+void bch_check_key_order_msg(struct btree *b, struct bset *i,
+ const char *fmt, ...)
+{
+ struct bkey *k;
+
+ if (!i->keys)
+ return;
+
+ for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
+ if (skipped_backwards(b, k)) {
+ va_list args;
+ va_start(args, fmt);
+
+ vdump_bucket_and_panic(b, fmt, args);
+ va_end(args);
+ }
+}
+
+void bch_check_keys(struct btree *b, const char *fmt, ...)
+{
+ va_list args;
+ struct bkey *k, *p = NULL;
+ struct btree_iter iter;
+
+ if (b->level)
+ return;
+
+ for_each_key(b, k, &iter) {
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
+ printk(KERN_ERR "Keys out of order:\n");
+ goto bug;
+ }
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
+ printk(KERN_ERR "Overlapping keys:\n");
+ goto bug;
+ }
+ p = k;
+ }
+ return;
+bug:
+ va_start(args, fmt);
+ vdump_bucket_and_panic(b, fmt, args);
+ va_end(args);
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+/* XXX: cache set refcounting */
+
+struct dump_iterator {
+ char buf[PAGE_SIZE];
+ size_t bytes;
+ struct cache_set *c;
+ struct keybuf keys;
+};
+
+static bool dump_pred(struct keybuf *buf, struct bkey *k)
+{
+ return true;
+}
+
+static ssize_t bch_dump_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iterator *i = file->private_data;
+ ssize_t ret = 0;
+
+ while (size) {
+ struct keybuf_key *w;
+ unsigned bytes = min(i->bytes, size);
+
+ int err = copy_to_user(buf, i->buf, bytes);
+ if (err)
+ return err;
+
+ ret += bytes;
+ buf += bytes;
+ size -= bytes;
+ i->bytes -= bytes;
+ memmove(i->buf, i->buf + bytes, i->bytes);
+
+ if (i->bytes)
+ break;
+
+ w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY);
+ if (!w)
+ break;
+
+ i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key));
+ bch_keybuf_del(&i->keys, w);
+ }
+
+ return ret;
+}
+
+static int bch_dump_open(struct inode *inode, struct file *file)
+{
+ struct cache_set *c = inode->i_private;
+ struct dump_iterator *i;
+
+ i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
+ if (!i)
+ return -ENOMEM;
+
+ file->private_data = i;
+ i->c = c;
+ bch_keybuf_init(&i->keys, dump_pred);
+ i->keys.last_scanned = KEY(0, 0, 0);
+
+ return 0;
+}
+
+static int bch_dump_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations cache_set_debug_ops = {
+ .owner = THIS_MODULE,
+ .open = bch_dump_open,
+ .read = bch_dump_read,
+ .release = bch_dump_release
+};
+
+void bch_debug_init_cache_set(struct cache_set *c)
+{
+ if (!IS_ERR_OR_NULL(debug)) {
+ char name[50];
+ snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
+
+ c->debug = debugfs_create_file(name, 0400, debug, c,
+ &cache_set_debug_ops);
+ }
+}
+
+#endif
+
+/* Fuzz tester has rotted: */
+#if 0
+
+static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
+ const char *buffer, size_t size)
+{
+ void dump(struct btree *b)
+ {
+ struct bset *i;
+
+ for (i = b->sets[0].data;
+ index(i, b) < btree_blocks(b) &&
+ i->seq == b->sets[0].data->seq;
+ i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c))
+ dump_bset(b, i);
+ }
+
+ struct cache_sb *sb;
+ struct cache_set *c;
+ struct btree *all[3], *b, *fill, *orig;
+ int j;
+
+ struct btree_op op;
+ bch_btree_op_init_stack(&op);
+
+ sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL);
+ if (!sb)
+ return -ENOMEM;
+
+ sb->bucket_size = 128;
+ sb->block_size = 4;
+
+ c = bch_cache_set_alloc(sb);
+ if (!c)
+ return -ENOMEM;
+
+ for (j = 0; j < 3; j++) {
+ BUG_ON(list_empty(&c->btree_cache));
+ all[j] = list_first_entry(&c->btree_cache, struct btree, list);
+ list_del_init(&all[j]->list);
+
+ all[j]->key = KEY(0, 0, c->sb.bucket_size);
+ bkey_copy_key(&all[j]->key, &MAX_KEY);
+ }
+
+ b = all[0];
+ fill = all[1];
+ orig = all[2];
+
+ while (1) {
+ for (j = 0; j < 3; j++)
+ all[j]->written = all[j]->nsets = 0;
+
+ bch_bset_init_next(b);
+
+ while (1) {
+ struct bset *i = write_block(b);
+ struct bkey *k = op.keys.top;
+ unsigned rand;
+
+ bkey_init(k);
+ rand = get_random_int();
+
+ op.type = rand & 1
+ ? BTREE_INSERT
+ : BTREE_REPLACE;
+ rand >>= 1;
+
+ SET_KEY_SIZE(k, bucket_remainder(c, rand));
+ rand >>= c->bucket_bits;
+ rand &= 1024 * 512 - 1;
+ rand += c->sb.bucket_size;
+ SET_KEY_OFFSET(k, rand);
+#if 0
+ SET_KEY_PTRS(k, 1);
+#endif
+ bch_keylist_push(&op.keys);
+ bch_btree_insert_keys(b, &op);
+
+ if (should_split(b) ||
+ set_blocks(i, b->c) !=
+ __set_blocks(i, i->keys + 15, b->c)) {
+ i->csum = csum_set(i);
+
+ memcpy(write_block(fill),
+ i, set_bytes(i));
+
+ b->written += set_blocks(i, b->c);
+ fill->written = b->written;
+ if (b->written == btree_blocks(b))
+ break;
+
+ bch_btree_sort_lazy(b);
+ bch_bset_init_next(b);
+ }
+ }
+
+ memcpy(orig->sets[0].data,
+ fill->sets[0].data,
+ btree_bytes(c));
+
+ bch_btree_sort(b);
+ fill->written = 0;
+ bch_btree_read_done(&fill->io.cl);
+
+ if (b->sets[0].data->keys != fill->sets[0].data->keys ||
+ memcmp(b->sets[0].data->start,
+ fill->sets[0].data->start,
+ b->sets[0].data->keys * sizeof(uint64_t))) {
+ struct bset *i = b->sets[0].data;
+ struct bkey *k, *l;
+
+ for (k = i->start,
+ l = fill->sets[0].data->start;
+ k < end(i);
+ k = bkey_next(k), l = bkey_next(l))
+ if (bkey_cmp(k, l) ||
+ KEY_SIZE(k) != KEY_SIZE(l))
+ pr_err("key %zi differs: %s != %s",
+ (uint64_t *) k - i->d,
+ pkey(k), pkey(l));
+
+ for (j = 0; j < 3; j++) {
+ pr_err("**** Set %i ****", j);
+ dump(all[j]);
+ }
+ panic("\n");
+ }
+
+ pr_info("fuzz complete: %i keys", b->sets[0].data->keys);
+ }
+}
+
+kobj_attribute_write(fuzz, btree_fuzz);
+#endif
+
+void bch_debug_exit(void)
+{
+ if (!IS_ERR_OR_NULL(debug))
+ debugfs_remove_recursive(debug);
+}
+
+int __init bch_debug_init(struct kobject *kobj)
+{
+ int ret = 0;
+#if 0
+ ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr);
+ if (ret)
+ return ret;
+#endif
+
+ debug = debugfs_create_dir("bcache", NULL);
+ return ret;
+}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
new file mode 100644
index 0000000..f9378a2
--- /dev/null
+++ b/drivers/md/bcache/debug.h
@@ -0,0 +1,54 @@
+#ifndef _BCACHE_DEBUG_H
+#define _BCACHE_DEBUG_H
+
+/* Btree/bkey debug printing */
+
+#define KEYHACK_SIZE 80
+struct keyprint_hack {
+ char s[KEYHACK_SIZE];
+};
+
+struct keyprint_hack bch_pkey(const struct bkey *k);
+struct keyprint_hack bch_pbtree(const struct btree *b);
+#define pkey(k) (&bch_pkey(k).s[0])
+#define pbtree(b) (&bch_pbtree(b).s[0])
+
+#ifdef CONFIG_BCACHE_EDEBUG
+
+unsigned bch_count_data(struct btree *);
+void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
+void bch_check_keys(struct btree *, const char *, ...);
+
+#define bch_check_key_order(b, i) \
+ bch_check_key_order_msg(b, i, "keys out of order")
+#define EBUG_ON(cond) BUG_ON(cond)
+
+#else /* EDEBUG */
+
+#define bch_count_data(b) 0
+#define bch_check_key_order(b, i) do {} while (0)
+#define bch_check_key_order_msg(b, i, ...) do {} while (0)
+#define bch_check_keys(b, ...) do {} while (0)
+#define EBUG_ON(cond) do {} while (0)
+
+#endif
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_btree_verify(struct btree *, struct bset *);
+void bch_data_verify(struct search *);
+
+#else /* DEBUG */
+
+static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_data_verify(struct search *s) {};
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void bch_debug_init_cache_set(struct cache_set *);
+#else
+static inline void bch_debug_init_cache_set(struct cache_set *c) {}
+#endif
+
+#endif
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
new file mode 100644
index 0000000..48efd4d
--- /dev/null
+++ b/drivers/md/bcache/io.c
@@ -0,0 +1,397 @@
+/*
+ * Some low level IO code, and hacks for various block layer limitations
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "bset.h"
+#include "debug.h"
+
+static void bch_bi_idx_hack_endio(struct bio *bio, int error)
+{
+ struct bio *p = bio->bi_private;
+
+ bio_endio(p, error);
+ bio_put(bio);
+}
+
+static void bch_generic_make_request_hack(struct bio *bio)
+{
+ if (bio->bi_idx) {
+ struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
+
+ memcpy(clone->bi_io_vec,
+ bio_iovec(bio),
+ bio_segments(bio) * sizeof(struct bio_vec));
+
+ clone->bi_sector = bio->bi_sector;
+ clone->bi_bdev = bio->bi_bdev;
+ clone->bi_rw = bio->bi_rw;
+ clone->bi_vcnt = bio_segments(bio);
+ clone->bi_size = bio->bi_size;
+
+ clone->bi_private = bio;
+ clone->bi_end_io = bch_bi_idx_hack_endio;
+
+ bio = clone;
+ }
+
+ /*
+ * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
+ * bios might have had more than that (before we split them per device
+ * limitations).
+ *
+ * To be taken out once immutable bvec stuff is in.
+ */
+ bio->bi_max_vecs = bio->bi_vcnt;
+
+ generic_make_request(bio);
+}
+
+/**
+ * bch_bio_split - split a bio
+ * @bio: bio to split
+ * @sectors: number of sectors to split from the front of @bio
+ * @gfp: gfp mask
+ * @bs: bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
+ * unchanged.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
+ * bvec boundry; it is the caller's responsibility to ensure that @bio is not
+ * freed before the split.
+ *
+ * If bch_bio_split() is running under generic_make_request(), it's not safe to
+ * allocate more than one bio from the same bio set. Therefore, if it is running
+ * under generic_make_request() it masks out __GFP_WAIT when doing the
+ * allocation. The caller must check for failure if there's any possibility of
+ * it being called from under generic_make_request(); it is then the caller's
+ * responsibility to retry from a safe context (by e.g. punting to workqueue).
+ */
+struct bio *bch_bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs)
+{
+ unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
+ struct bio_vec *bv;
+ struct bio *ret = NULL;
+
+ BUG_ON(sectors <= 0);
+
+ /*
+ * If we're being called from underneath generic_make_request() and we
+ * already allocated any bios from this bio set, we risk deadlock if we
+ * use the mempool. So instead, we possibly fail and let the caller punt
+ * to workqueue or somesuch and retry in a safe context.
+ */
+ if (current->bio_list)
+ gfp &= ~__GFP_WAIT;
+
+ if (sectors >= bio_sectors(bio))
+ return bio;
+
+ if (bio->bi_rw & REQ_DISCARD) {
+ ret = bio_alloc_bioset(gfp, 1, bs);
+ idx = 0;
+ goto out;
+ }
+
+ bio_for_each_segment(bv, bio, idx) {
+ vcnt = idx - bio->bi_idx;
+
+ if (!nbytes) {
+ ret = bio_alloc_bioset(gfp, vcnt, bs);
+ if (!ret)
+ return NULL;
+
+ memcpy(ret->bi_io_vec, bio_iovec(bio),
+ sizeof(struct bio_vec) * vcnt);
+
+ break;
+ } else if (nbytes < bv->bv_len) {
+ ret = bio_alloc_bioset(gfp, ++vcnt, bs);
+ if (!ret)
+ return NULL;
+
+ memcpy(ret->bi_io_vec, bio_iovec(bio),
+ sizeof(struct bio_vec) * vcnt);
+
+ ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
+ bv->bv_offset += nbytes;
+ bv->bv_len -= nbytes;
+ break;
+ }
+
+ nbytes -= bv->bv_len;
+ }
+out:
+ ret->bi_bdev = bio->bi_bdev;
+ ret->bi_sector = bio->bi_sector;
+ ret->bi_size = sectors << 9;
+ ret->bi_rw = bio->bi_rw;
+ ret->bi_vcnt = vcnt;
+ ret->bi_max_vecs = vcnt;
+
+ bio->bi_sector += sectors;
+ bio->bi_size -= sectors << 9;
+ bio->bi_idx = idx;
+
+ if (bio_integrity(bio)) {
+ if (bio_integrity_clone(ret, bio, gfp)) {
+ bio_put(ret);
+ return NULL;
+ }
+
+ bio_integrity_trim(ret, 0, bio_sectors(ret));
+ bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
+ }
+
+ return ret;
+}
+
+static unsigned bch_bio_max_sectors(struct bio *bio)
+{
+ unsigned ret = bio_sectors(bio);
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
+ queue_max_segments(q));
+ struct bio_vec *bv, *end = bio_iovec(bio) +
+ min_t(int, bio_segments(bio), max_segments);
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return min(ret, q->limits.max_discard_sectors);
+
+ if (bio_segments(bio) > max_segments ||
+ q->merge_bvec_fn) {
+ ret = 0;
+
+ for (bv = bio_iovec(bio); bv < end; bv++) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_sector,
+ .bi_size = ret << 9,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
+ break;
+
+ ret += bv->bv_len >> 9;
+ }
+ }
+
+ ret = min(ret, queue_max_sectors(q));
+
+ WARN_ON(!ret);
+ ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+
+ return ret;
+}
+
+static void bch_bio_submit_split_done(struct closure *cl)
+{
+ struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
+
+ s->bio->bi_end_io = s->bi_end_io;
+ s->bio->bi_private = s->bi_private;
+ bio_endio(s->bio, 0);
+
+ closure_debug_destroy(&s->cl);
+ mempool_free(s, s->p->bio_split_hook);
+}
+
+static void bch_bio_submit_split_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
+
+ if (error)
+ clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
+
+ bio_put(bio);
+ closure_put(cl);
+}
+
+static void __bch_bio_submit_split(struct closure *cl)
+{
+ struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
+ struct bio *bio = s->bio, *n;
+
+ do {
+ n = bch_bio_split(bio, bch_bio_max_sectors(bio),
+ GFP_NOIO, s->p->bio_split);
+ if (!n)
+ continue_at(cl, __bch_bio_submit_split, system_wq);
+
+ n->bi_end_io = bch_bio_submit_split_endio;
+ n->bi_private = cl;
+
+ closure_get(cl);
+ bch_generic_make_request_hack(n);
+ } while (n != bio);
+
+ continue_at(cl, bch_bio_submit_split_done, NULL);
+}
+
+void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
+{
+ struct bio_split_hook *s;
+
+ if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
+ goto submit;
+
+ if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
+ goto submit;
+
+ s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
+
+ s->bio = bio;
+ s->p = p;
+ s->bi_end_io = bio->bi_end_io;
+ s->bi_private = bio->bi_private;
+ bio_get(bio);
+
+ closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL);
+ return;
+submit:
+ bch_generic_make_request_hack(bio);
+}
+
+/* Bios with headers */
+
+void bch_bbio_free(struct bio *bio, struct cache_set *c)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ mempool_free(b, c->bio_meta);
+}
+
+struct bio *bch_bbio_alloc(struct cache_set *c)
+{
+ struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
+ struct bio *bio = &b->bio;
+
+ bio_init(bio);
+ bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = bucket_pages(c);
+ bio->bi_io_vec = bio->bi_inline_vecs;
+
+ return bio;
+}
+
+void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+
+ bio->bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+
+ b->submit_time_us = local_clock_us();
+ closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
+}
+
+void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+ struct bkey *k, unsigned ptr)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ bch_bkey_copy_single_ptr(&b->key, k, ptr);
+ __bch_submit_bbio(bio, c);
+}
+
+/* IO errors */
+
+void bch_count_io_errors(struct cache *ca, int error, const char *m)
+{
+ /*
+ * The halflife of an error is:
+ * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
+ */
+
+ if (ca->set->error_decay) {
+ unsigned count = atomic_inc_return(&ca->io_count);
+
+ while (count > ca->set->error_decay) {
+ unsigned errors;
+ unsigned old = count;
+ unsigned new = count - ca->set->error_decay;
+
+ /*
+ * First we subtract refresh from count; each time we
+ * succesfully do so, we rescale the errors once:
+ */
+
+ count = atomic_cmpxchg(&ca->io_count, old, new);
+
+ if (count == old) {
+ count = new;
+
+ errors = atomic_read(&ca->io_errors);
+ do {
+ old = errors;
+ new = ((uint64_t) errors * 127) / 128;
+ errors = atomic_cmpxchg(&ca->io_errors,
+ old, new);
+ } while (old != errors);
+ }
+ }
+ }
+
+ if (error) {
+ char buf[BDEVNAME_SIZE];
+ unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
+ &ca->io_errors);
+ errors >>= IO_ERROR_SHIFT;
+
+ if (errors < ca->set->error_limit)
+ pr_err("%s: IO error on %s, recovering",
+ bdevname(ca->bdev, buf), m);
+ else
+ bch_cache_set_error(ca->set,
+ "%s: too many IO errors %s",
+ bdevname(ca->bdev, buf), m);
+ }
+}
+
+void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+ int error, const char *m)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ struct cache *ca = PTR_CACHE(c, &b->key, 0);
+
+ unsigned threshold = bio->bi_rw & REQ_WRITE
+ ? c->congested_write_threshold_us
+ : c->congested_read_threshold_us;
+
+ if (threshold) {
+ unsigned t = local_clock_us();
+
+ int us = t - b->submit_time_us;
+ int congested = atomic_read(&c->congested);
+
+ if (us > (int) threshold) {
+ int ms = us / 1024;
+ c->congested_last_us = t;
+
+ ms = min(ms, CONGESTED_MAX + congested);
+ atomic_sub(ms, &c->congested);
+ } else if (congested < 0)
+ atomic_inc(&c->congested);
+ }
+
+ bch_count_io_errors(ca, error, m);
+}
+
+void bch_bbio_endio(struct cache_set *c, struct bio *bio,
+ int error, const char *m)
+{
+ struct closure *cl = bio->bi_private;
+
+ bch_bbio_count_io_errors(c, bio, error, m);
+ bio_put(bio);
+ closure_put(cl);
+}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
new file mode 100644
index 0000000..8c8dfdc
--- /dev/null
+++ b/drivers/md/bcache/journal.c
@@ -0,0 +1,787 @@
+/*
+ * bcache journalling code, for btree insertions
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+/*
+ * Journal replay/recovery:
+ *
+ * This code is all driven from run_cache_set(); we first read the journal
+ * entries, do some other stuff, then we mark all the keys in the journal
+ * entries (same as garbage collection would), then we replay them - reinserting
+ * them into the cache in precisely the same order as they appear in the
+ * journal.
+ *
+ * We only journal keys that go in leaf nodes, which simplifies things quite a
+ * bit.
+ */
+
+static void journal_read_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ closure_put(cl);
+}
+
+static int journal_read_bucket(struct cache *ca, struct list_head *list,
+ struct btree_op *op, unsigned bucket_index)
+{
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->bio;
+
+ struct journal_replay *i;
+ struct jset *j, *data = ca->set->journal.w[0].data;
+ unsigned len, left, offset = 0;
+ int ret = 0;
+ sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+
+ pr_debug("reading %llu", (uint64_t) bucket);
+
+ while (offset < ca->sb.bucket_size) {
+reread: left = ca->sb.bucket_size - offset;
+ len = min_t(unsigned, left, PAGE_SECTORS * 8);
+
+ bio_reset(bio);
+ bio->bi_sector = bucket + offset;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = READ;
+ bio->bi_size = len << 9;
+
+ bio->bi_end_io = journal_read_endio;
+ bio->bi_private = &op->cl;
+ bch_bio_map(bio, data);
+
+ closure_bio_submit(bio, &op->cl, ca);
+ closure_sync(&op->cl);
+
+ /* This function could be simpler now since we no longer write
+ * journal entries that overlap bucket boundaries; this means
+ * the start of a bucket will always have a valid journal entry
+ * if it has any journal entries at all.
+ */
+
+ j = data;
+ while (len) {
+ struct list_head *where;
+ size_t blocks, bytes = set_bytes(j);
+
+ if (j->magic != jset_magic(ca->set))
+ return ret;
+
+ if (bytes > left << 9)
+ return ret;
+
+ if (bytes > len << 9)
+ goto reread;
+
+ if (j->csum != csum_set(j))
+ return ret;
+
+ blocks = set_blocks(j, ca->set);
+
+ while (!list_empty(list)) {
+ i = list_first_entry(list,
+ struct journal_replay, list);
+ if (i->j.seq >= j->last_seq)
+ break;
+ list_del(&i->list);
+ kfree(i);
+ }
+
+ list_for_each_entry_reverse(i, list, list) {
+ if (j->seq == i->j.seq)
+ goto next_set;
+
+ if (j->seq < i->j.last_seq)
+ goto next_set;
+
+ if (j->seq > i->j.seq) {
+ where = &i->list;
+ goto add;
+ }
+ }
+
+ where = list;
+add:
+ i = kmalloc(offsetof(struct journal_replay, j) +
+ bytes, GFP_KERNEL);
+ if (!i)
+ return -ENOMEM;
+ memcpy(&i->j, j, bytes);
+ list_add(&i->list, where);
+ ret = 1;
+
+ ja->seq[bucket_index] = j->seq;
+next_set:
+ offset += blocks * ca->sb.block_size;
+ len -= blocks * ca->sb.block_size;
+ j = ((void *) j) + blocks * block_bytes(ca);
+ }
+ }
+
+ return ret;
+}
+
+int bch_journal_read(struct cache_set *c, struct list_head *list,
+ struct btree_op *op)
+{
+#define read_bucket(b) \
+ ({ \
+ int ret = journal_read_bucket(ca, list, op, b); \
+ __set_bit(b, bitmap); \
+ if (ret < 0) \
+ return ret; \
+ ret; \
+ })
+
+ struct cache *ca;
+ unsigned iter;
+
+ for_each_cache(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+ unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
+ unsigned i, l, r, m;
+ uint64_t seq;
+
+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+ pr_debug("%u journal buckets", ca->sb.njournal_buckets);
+
+ /* Read journal buckets ordered by golden ratio hash to quickly
+ * find a sequence of buckets with valid journal entries
+ */
+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
+
+ if (test_bit(l, bitmap))
+ break;
+
+ if (read_bucket(l))
+ goto bsearch;
+ }
+
+ /* If that fails, check all the buckets we haven't checked
+ * already
+ */
+ pr_debug("falling back to linear search");
+
+ for (l = 0; l < ca->sb.njournal_buckets; l++) {
+ if (test_bit(l, bitmap))
+ continue;
+
+ if (read_bucket(l))
+ goto bsearch;
+ }
+bsearch:
+ /* Binary search */
+ m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+ pr_debug("starting binary search, l %u r %u", l, r);
+
+ while (l + 1 < r) {
+ m = (l + r) >> 1;
+
+ if (read_bucket(m))
+ l = m;
+ else
+ r = m;
+ }
+
+ /* Read buckets in reverse order until we stop finding more
+ * journal entries
+ */
+ pr_debug("finishing up");
+ l = m;
+
+ while (1) {
+ if (!l--)
+ l = ca->sb.njournal_buckets - 1;
+
+ if (l == m)
+ break;
+
+ if (test_bit(l, bitmap))
+ continue;
+
+ if (!read_bucket(l))
+ break;
+ }
+
+ seq = 0;
+
+ for (i = 0; i < ca->sb.njournal_buckets; i++)
+ if (ja->seq[i] > seq) {
+ seq = ja->seq[i];
+ ja->cur_idx = ja->discard_idx =
+ ja->last_idx = i;
+
+ }
+ }
+
+ c->journal.seq = list_entry(list->prev,
+ struct journal_replay,
+ list)->j.seq;
+
+ return 0;
+#undef read_bucket
+}
+
+void bch_journal_mark(struct cache_set *c, struct list_head *list)
+{
+ atomic_t p = { 0 };
+ struct bkey *k;
+ struct journal_replay *i;
+ struct journal *j = &c->journal;
+ uint64_t last = j->seq;
+
+ /*
+ * journal.pin should never fill up - we never write a journal
+ * entry when it would fill up. But if for some reason it does, we
+ * iterate over the list in reverse order so that we can just skip that
+ * refcount instead of bugging.
+ */
+
+ list_for_each_entry_reverse(i, list, list) {
+ BUG_ON(last < i->j.seq);
+ i->pin = NULL;
+
+ while (last-- != i->j.seq)
+ if (fifo_free(&j->pin) > 1) {
+ fifo_push_front(&j->pin, p);
+ atomic_set(&fifo_front(&j->pin), 0);
+ }
+
+ if (fifo_free(&j->pin) > 1) {
+ fifo_push_front(&j->pin, p);
+ i->pin = &fifo_front(&j->pin);
+ atomic_set(i->pin, 1);
+ }
+
+ for (k = i->j.start;
+ k < end(&i->j);
+ k = bkey_next(k)) {
+ unsigned j;
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ struct bucket *g = PTR_BUCKET(c, k, j);
+ atomic_inc(&g->pin);
+
+ if (g->prio == BTREE_PRIO &&
+ !ptr_stale(c, k, j))
+ g->prio = INITIAL_PRIO;
+ }
+
+ __bch_btree_mark_key(c, 0, k);
+ }
+ }
+}
+
+int bch_journal_replay(struct cache_set *s, struct list_head *list,
+ struct btree_op *op)
+{
+ int ret = 0, keys = 0, entries = 0;
+ struct bkey *k;
+ struct journal_replay *i =
+ list_entry(list->prev, struct journal_replay, list);
+
+ uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+
+ list_for_each_entry(i, list, list) {
+ BUG_ON(i->pin && atomic_read(i->pin) != 1);
+
+ if (n != i->j.seq)
+ pr_err(
+ "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
+ n, i->j.seq - 1, start, end);
+
+ for (k = i->j.start;
+ k < end(&i->j);
+ k = bkey_next(k)) {
+ pr_debug("%s", pkey(k));
+ bkey_copy(op->keys.top, k);
+ bch_keylist_push(&op->keys);
+
+ op->journal = i->pin;
+ atomic_inc(op->journal);
+
+ ret = bch_btree_insert(op, s);
+ if (ret)
+ goto err;
+
+ BUG_ON(!bch_keylist_empty(&op->keys));
+ keys++;
+
+ cond_resched();
+ }
+
+ if (i->pin)
+ atomic_dec(i->pin);
+ n = i->j.seq + 1;
+ entries++;
+ }
+
+ pr_info("journal replay done, %i keys in %i entries, seq %llu",
+ keys, entries, end);
+
+ while (!list_empty(list)) {
+ i = list_first_entry(list, struct journal_replay, list);
+ list_del(&i->list);
+ kfree(i);
+ }
+err:
+ closure_sync(&op->cl);
+ return ret;
+}
+
+/* Journalling */
+
+static void btree_flush_write(struct cache_set *c)
+{
+ /*
+ * Try to find the btree node with that references the oldest journal
+ * entry, best is our current candidate and is locked if non NULL:
+ */
+ struct btree *b, *best = NULL;
+ unsigned iter;
+
+ for_each_cached_btree(b, c, iter) {
+ if (!down_write_trylock(&b->lock))
+ continue;
+
+ if (!btree_node_dirty(b) ||
+ !btree_current_write(b)->journal) {
+ rw_unlock(true, b);
+ continue;
+ }
+
+ if (!best)
+ best = b;
+ else if (journal_pin_cmp(c,
+ btree_current_write(best),
+ btree_current_write(b))) {
+ rw_unlock(true, best);
+ best = b;
+ } else
+ rw_unlock(true, b);
+ }
+
+ if (best)
+ goto out;
+
+ /* We can't find the best btree node, just pick the first */
+ list_for_each_entry(b, &c->btree_cache, list)
+ if (!b->level && btree_node_dirty(b)) {
+ best = b;
+ rw_lock(true, best, best->level);
+ goto found;
+ }
+
+out:
+ if (!best)
+ return;
+found:
+ if (btree_node_dirty(best))
+ bch_btree_write(best, true, NULL);
+ rw_unlock(true, best);
+}
+
+#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
+
+static void journal_discard_endio(struct bio *bio, int error)
+{
+ struct journal_device *ja =
+ container_of(bio, struct journal_device, discard_bio);
+ struct cache *ca = container_of(ja, struct cache, journal);
+
+ atomic_set(&ja->discard_in_flight, DISCARD_DONE);
+
+ closure_wake_up(&ca->set->journal.wait);
+ closure_put(&ca->set->cl);
+}
+
+static void journal_discard_work(struct work_struct *work)
+{
+ struct journal_device *ja =
+ container_of(work, struct journal_device, discard_work);
+
+ submit_bio(0, &ja->discard_bio);
+}
+
+static void do_journal_discard(struct cache *ca)
+{
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->discard_bio;
+
+ if (!ca->discard) {
+ ja->discard_idx = ja->last_idx;
+ return;
+ }
+
+ switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+ case DISCARD_IN_FLIGHT:
+ return;
+
+ case DISCARD_DONE:
+ ja->discard_idx = (ja->discard_idx + 1) %
+ ca->sb.njournal_buckets;
+
+ atomic_set(&ja->discard_in_flight, DISCARD_READY);
+ /* fallthrough */
+
+ case DISCARD_READY:
+ if (ja->discard_idx == ja->last_idx)
+ return;
+
+ atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
+
+ bio_init(bio);
+ bio->bi_sector = bucket_to_sector(ca->set,
+ ca->sb.d[ja->discard_idx]);
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_WRITE|REQ_DISCARD;
+ bio->bi_max_vecs = 1;
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_size = bucket_bytes(ca);
+ bio->bi_end_io = journal_discard_endio;
+
+ closure_get(&ca->set->cl);
+ INIT_WORK(&ja->discard_work, journal_discard_work);
+ schedule_work(&ja->discard_work);
+ }
+}
+
+static void journal_reclaim(struct cache_set *c)
+{
+ struct bkey *k = &c->journal.key;
+ struct cache *ca;
+ uint64_t last_seq;
+ unsigned iter, n = 0;
+ atomic_t p;
+
+ while (!atomic_read(&fifo_front(&c->journal.pin)))
+ fifo_pop(&c->journal.pin, p);
+
+ last_seq = last_seq(&c->journal);
+
+ /* Update last_idx */
+
+ for_each_cache(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+
+ while (ja->last_idx != ja->cur_idx &&
+ ja->seq[ja->last_idx] < last_seq)
+ ja->last_idx = (ja->last_idx + 1) %
+ ca->sb.njournal_buckets;
+ }
+
+ for_each_cache(ca, c, iter)
+ do_journal_discard(ca);
+
+ if (c->journal.blocks_free)
+ return;
+
+ /*
+ * Allocate:
+ * XXX: Sort by free journal space
+ */
+
+ for_each_cache(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+ unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+
+ /* No space available on this device */
+ if (next == ja->discard_idx)
+ continue;
+
+ ja->cur_idx = next;
+ k->ptr[n++] = PTR(0,
+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+ ca->sb.nr_this_dev);
+ }
+
+ bkey_init(k);
+ SET_KEY_PTRS(k, n);
+
+ if (n)
+ c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+
+ if (!journal_full(&c->journal))
+ __closure_wake_up(&c->journal.wait);
+}
+
+void bch_journal_next(struct journal *j)
+{
+ atomic_t p = { 1 };
+
+ j->cur = (j->cur == j->w)
+ ? &j->w[1]
+ : &j->w[0];
+
+ /*
+ * The fifo_push() needs to happen at the same time as j->seq is
+ * incremented for last_seq() to be calculated correctly
+ */
+ BUG_ON(!fifo_push(&j->pin, p));
+ atomic_set(&fifo_back(&j->pin), 1);
+
+ j->cur->data->seq = ++j->seq;
+ j->cur->need_write = false;
+ j->cur->data->keys = 0;
+
+ if (fifo_full(&j->pin))
+ pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
+}
+
+static void journal_write_endio(struct bio *bio, int error)
+{
+ struct journal_write *w = bio->bi_private;
+
+ cache_set_err_on(error, w->c, "journal io error");
+ closure_put(&w->c->journal.io.cl);
+}
+
+static void journal_write(struct closure *);
+
+static void journal_write_done(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io.cl);
+ struct cache_set *c = container_of(j, struct cache_set, journal);
+
+ struct journal_write *w = (j->cur == j->w)
+ ? &j->w[1]
+ : &j->w[0];
+
+ __closure_wake_up(&w->wait);
+
+ if (c->journal_delay_ms)
+ closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
+
+ continue_at(cl, journal_write, system_wq);
+}
+
+static void journal_write_unlocked(struct closure *cl)
+ __releases(c->journal.lock)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache *ca;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+ unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+
+ struct bio *bio;
+ struct bio_list list;
+ bio_list_init(&list);
+
+ if (!w->need_write) {
+ /*
+ * XXX: have to unlock closure before we unlock journal lock,
+ * else we race with bch_journal(). But this way we race
+ * against cache set unregister. Doh.
+ */
+ set_closure_fn(cl, NULL, NULL);
+ closure_sub(cl, CLOSURE_RUNNING + 1);
+ spin_unlock(&c->journal.lock);
+ return;
+ } else if (journal_full(&c->journal)) {
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ continue_at(cl, journal_write, system_wq);
+ }
+
+ c->journal.blocks_free -= set_blocks(w->data, c);
+
+ w->data->btree_level = c->root->level;
+
+ bkey_copy(&w->data->btree_root, &c->root->key);
+ bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+ for_each_cache(ca, c, i)
+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+
+ w->data->magic = jset_magic(c);
+ w->data->version = BCACHE_JSET_VERSION;
+ w->data->last_seq = last_seq(&c->journal);
+ w->data->csum = csum_set(w->data);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ ca = PTR_CACHE(c, k, i);
+ bio = &ca->journal.bio;
+
+ atomic_long_add(sectors, &ca->meta_sectors_written);
+
+ bio_reset(bio);
+ bio->bi_sector = PTR_OFFSET(k, i);
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH;
+ bio->bi_size = sectors << 9;
+
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = w;
+ bch_bio_map(bio, w->data);
+
+ trace_bcache_journal_write(bio);
+ bio_list_add(&list, bio);
+
+ SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
+
+ ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
+ }
+
+ atomic_dec_bug(&fifo_back(&c->journal.pin));
+ bch_journal_next(&c->journal);
+ journal_reclaim(c);
+
+ spin_unlock(&c->journal.lock);
+
+ while ((bio = bio_list_pop(&list)))
+ closure_bio_submit(bio, cl, c->cache[0]);
+
+ continue_at(cl, journal_write_done, NULL);
+}
+
+static void journal_write(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+
+ spin_lock(&c->journal.lock);
+ journal_write_unlocked(cl);
+}
+
+static void __journal_try_write(struct cache_set *c, bool noflush)
+ __releases(c->journal.lock)
+{
+ struct closure *cl = &c->journal.io.cl;
+
+ if (!closure_trylock(cl, &c->cl))
+ spin_unlock(&c->journal.lock);
+ else if (noflush && journal_full(&c->journal)) {
+ spin_unlock(&c->journal.lock);
+ continue_at(cl, journal_write, system_wq);
+ } else
+ journal_write_unlocked(cl);
+}
+
+#define journal_try_write(c) __journal_try_write(c, false)
+
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+ struct journal_write *w;
+
+ if (CACHE_SYNC(&c->sb)) {
+ spin_lock(&c->journal.lock);
+
+ w = c->journal.cur;
+ w->need_write = true;
+
+ if (cl)
+ BUG_ON(!closure_wait(&w->wait, cl));
+
+ __journal_try_write(c, true);
+ }
+}
+
+/*
+ * Entry point to the journalling code - bio_insert() and btree_invalidate()
+ * pass bch_journal() a list of keys to be journalled, and then
+ * bch_journal() hands those same keys off to btree_insert_async()
+ */
+
+void bch_journal(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct cache_set *c = op->c;
+ struct journal_write *w;
+ size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
+
+ if (op->type != BTREE_INSERT ||
+ !CACHE_SYNC(&c->sb))
+ goto out;
+
+ /*
+ * If we're looping because we errored, might already be waiting on
+ * another journal write:
+ */
+ while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
+ closure_sync(cl->parent);
+
+ spin_lock(&c->journal.lock);
+
+ if (journal_full(&c->journal)) {
+ /* XXX: tracepoint */
+ closure_wait(&c->journal.wait, cl);
+
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ continue_at(cl, bch_journal, bcache_wq);
+ }
+
+ w = c->journal.cur;
+ w->need_write = true;
+ b = __set_blocks(w->data, w->data->keys + n, c);
+
+ if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
+ b > c->journal.blocks_free) {
+ /* XXX: If we were inserting so many keys that they won't fit in
+ * an _empty_ journal write, we'll deadlock. For now, handle
+ * this in bch_keylist_realloc() - but something to think about.
+ */
+ BUG_ON(!w->data->keys);
+
+ /* XXX: tracepoint */
+ BUG_ON(!closure_wait(&w->wait, cl));
+
+ closure_flush(&c->journal.io);
+
+ journal_try_write(c);
+ continue_at(cl, bch_journal, bcache_wq);
+ }
+
+ memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
+ w->data->keys += n;
+
+ op->journal = &fifo_back(&c->journal.pin);
+ atomic_inc(op->journal);
+
+ if (op->flush_journal) {
+ closure_flush(&c->journal.io);
+ closure_wait(&w->wait, cl->parent);
+ }
+
+ journal_try_write(c);
+out:
+ bch_btree_insert_async(cl);
+}
+
+void bch_journal_free(struct cache_set *c)
+{
+ free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
+ free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
+ free_fifo(&c->journal.pin);
+}
+
+int bch_journal_alloc(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+
+ closure_init_unlocked(&j->io);
+ spin_lock_init(&j->lock);
+
+ c->journal_delay_ms = 100;
+
+ j->w[0].c = c;
+ j->w[1].c = c;
+
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
+ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
new file mode 100644
index 0000000..3d78512
--- /dev/null
+++ b/drivers/md/bcache/journal.h
@@ -0,0 +1,215 @@
+#ifndef _BCACHE_JOURNAL_H
+#define _BCACHE_JOURNAL_H
+
+/*
+ * THE JOURNAL:
+ *
+ * The journal is treated as a circular buffer of buckets - a journal entry
+ * never spans two buckets. This means (not implemented yet) we can resize the
+ * journal at runtime, and will be needed for bcache on raw flash support.
+ *
+ * Journal entries contain a list of keys, ordered by the time they were
+ * inserted; thus journal replay just has to reinsert the keys.
+ *
+ * We also keep some things in the journal header that are logically part of the
+ * superblock - all the things that are frequently updated. This is for future
+ * bcache on raw flash support; the superblock (which will become another
+ * journal) can't be moved or wear leveled, so it contains just enough
+ * information to find the main journal, and the superblock only has to be
+ * rewritten when we want to move/wear level the main journal.
+ *
+ * Currently, we don't journal BTREE_REPLACE operations - this will hopefully be
+ * fixed eventually. This isn't a bug - BTREE_REPLACE is used for insertions
+ * from cache misses, which don't have to be journaled, and for writeback and
+ * moving gc we work around it by flushing the btree to disk before updating the
+ * gc information. But it is a potential issue with incremental garbage
+ * collection, and it's fragile.
+ *
+ * OPEN JOURNAL ENTRIES:
+ *
+ * Each journal entry contains, in the header, the sequence number of the last
+ * journal entry still open - i.e. that has keys that haven't been flushed to
+ * disk in the btree.
+ *
+ * We track this by maintaining a refcount for every open journal entry, in a
+ * fifo; each entry in the fifo corresponds to a particular journal
+ * entry/sequence number. When the refcount at the tail of the fifo goes to
+ * zero, we pop it off - thus, the size of the fifo tells us the number of open
+ * journal entries
+ *
+ * We take a refcount on a journal entry when we add some keys to a journal
+ * entry that we're going to insert (held by struct btree_op), and then when we
+ * insert those keys into the btree the btree write we're setting up takes a
+ * copy of that refcount (held by struct btree_write). That refcount is dropped
+ * when the btree write completes.
+ *
+ * A struct btree_write can only hold a refcount on a single journal entry, but
+ * might contain keys for many journal entries - we handle this by making sure
+ * it always has a refcount on the _oldest_ journal entry of all the journal
+ * entries it has keys for.
+ *
+ * JOURNAL RECLAIM:
+ *
+ * As mentioned previously, our fifo of refcounts tells us the number of open
+ * journal entries; from that and the current journal sequence number we compute
+ * last_seq - the oldest journal entry we still need. We write last_seq in each
+ * journal entry, and we also have to keep track of where it exists on disk so
+ * we don't overwrite it when we loop around the journal.
+ *
+ * To do that we track, for each journal bucket, the sequence number of the
+ * newest journal entry it contains - if we don't need that journal entry we
+ * don't need anything in that bucket anymore. From that we track the last
+ * journal bucket we still need; all this is tracked in struct journal_device
+ * and updated by journal_reclaim().
+ *
+ * JOURNAL FILLING UP:
+ *
+ * There are two ways the journal could fill up; either we could run out of
+ * space to write to, or we could have too many open journal entries and run out
+ * of room in the fifo of refcounts. Since those refcounts are decremented
+ * without any locking we can't safely resize that fifo, so we handle it the
+ * same way.
+ *
+ * If the journal fills up, we start flushing dirty btree nodes until we can
+ * allocate space for a journal write again - preferentially flushing btree
+ * nodes that are pinning the oldest journal entries first.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1 1
+/* Always latest UUID format */
+#define BCACHE_JSET_VERSION_UUID 1
+#define BCACHE_JSET_VERSION 1
+
+/*
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+struct jset {
+ uint64_t csum;
+ uint64_t magic;
+ uint64_t seq;
+ uint32_t version;
+ uint32_t keys;
+
+ uint64_t last_seq;
+
+ BKEY_PADDED(uuid_bucket);
+ BKEY_PADDED(btree_root);
+ uint16_t btree_level;
+ uint16_t pad[3];
+
+ uint64_t prio_bucket[MAX_CACHES_PER_SET];
+
+ union {
+ struct bkey start[0];
+ uint64_t d[0];
+ };
+};
+
+/*
+ * Only used for holding the journal entries we read in btree_journal_read()
+ * during cache_registration
+ */
+struct journal_replay {
+ struct list_head list;
+ atomic_t *pin;
+ struct jset j;
+};
+
+/*
+ * We put two of these in struct journal; we used them for writes to the
+ * journal that are being staged or in flight.
+ */
+struct journal_write {
+ struct jset *data;
+#define JSET_BITS 3
+
+ struct cache_set *c;
+ struct closure_waitlist wait;
+ bool need_write;
+};
+
+/* Embedded in struct cache_set */
+struct journal {
+ spinlock_t lock;
+ /* used when waiting because the journal was full */
+ struct closure_waitlist wait;
+ struct closure_with_timer io;
+
+ /* Number of blocks free in the bucket(s) we're currently writing to */
+ unsigned blocks_free;
+ uint64_t seq;
+ DECLARE_FIFO(atomic_t, pin);
+
+ BKEY_PADDED(key);
+
+ struct journal_write w[2], *cur;
+};
+
+/*
+ * Embedded in struct cache. First three fields refer to the array of journal
+ * buckets, in cache_sb.
+ */
+struct journal_device {
+ /*
+ * For each journal bucket, contains the max sequence number of the
+ * journal writes it contains - so we know when a bucket can be reused.
+ */
+ uint64_t seq[SB_JOURNAL_BUCKETS];
+
+ /* Journal bucket we're currently writing to */
+ unsigned cur_idx;
+
+ /* Last journal bucket that still contains an open journal entry */
+ unsigned last_idx;
+
+ /* Next journal bucket to be discarded */
+ unsigned discard_idx;
+
+#define DISCARD_READY 0
+#define DISCARD_IN_FLIGHT 1
+#define DISCARD_DONE 2
+ /* 1 - discard in flight, -1 - discard completed */
+ atomic_t discard_in_flight;
+
+ struct work_struct discard_work;
+ struct bio discard_bio;
+ struct bio_vec discard_bv;
+
+ /* Bio for journal reads/writes to this device */
+ struct bio bio;
+ struct bio_vec bv[8];
+};
+
+#define journal_pin_cmp(c, l, r) \
+ (fifo_idx(&(c)->journal.pin, (l)->journal) > \
+ fifo_idx(&(c)->journal.pin, (r)->journal))
+
+#define JOURNAL_PIN 20000
+
+#define journal_full(j) \
+ (!(j)->blocks_free || fifo_free(&(j)->pin) <= 1)
+
+struct closure;
+struct cache_set;
+struct btree_op;
+
+void bch_journal(struct closure *);
+void bch_journal_next(struct journal *);
+void bch_journal_mark(struct cache_set *, struct list_head *);
+void bch_journal_meta(struct cache_set *, struct closure *);
+int bch_journal_read(struct cache_set *, struct list_head *,
+ struct btree_op *);
+int bch_journal_replay(struct cache_set *, struct list_head *,
+ struct btree_op *);
+
+void bch_journal_free(struct cache_set *);
+int bch_journal_alloc(struct cache_set *);
+
+#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
new file mode 100644
index 0000000..8589512
--- /dev/null
+++ b/drivers/md/bcache/movinggc.c
@@ -0,0 +1,254 @@
+/*
+ * Moving/copying garbage collector
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+struct moving_io {
+ struct keybuf_key *w;
+ struct search s;
+ struct bbio bio;
+};
+
+static bool moving_pred(struct keybuf *buf, struct bkey *k)
+{
+ struct cache_set *c = container_of(buf, struct cache_set,
+ moving_gc_keys);
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ struct bucket *g = PTR_BUCKET(c, k, i);
+
+ if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+ return true;
+ }
+
+ return false;
+}
+
+/* Moving GC - IO loop */
+
+static void moving_io_destructor(struct closure *cl)
+{
+ struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ kfree(io);
+}
+
+static void write_moving_finish(struct closure *cl)
+{
+ struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct bio *bio = &io->bio.bio;
+ struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
+
+ while (bv-- != bio->bi_io_vec)
+ __free_page(bv->bv_page);
+
+ pr_debug("%s %s", io->s.op.insert_collision
+ ? "collision moving" : "moved",
+ pkey(&io->w->key));
+
+ bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
+
+ atomic_dec_bug(&io->s.op.c->in_flight);
+ closure_wake_up(&io->s.op.c->moving_gc_wait);
+
+ closure_return_with_destructor(cl, moving_io_destructor);
+}
+
+static void read_moving_endio(struct bio *bio, int error)
+{
+ struct moving_io *io = container_of(bio->bi_private,
+ struct moving_io, s.cl);
+
+ if (error)
+ io->s.error = error;
+
+ bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
+}
+
+static void moving_init(struct moving_io *io)
+{
+ struct bio *bio = &io->bio.bio;
+
+ bio_init(bio);
+ bio_get(bio);
+ bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ bio->bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
+ PAGE_SECTORS);
+ bio->bi_private = &io->s.cl;
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bch_bio_map(bio, NULL);
+}
+
+static void write_moving(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct moving_io *io = container_of(s, struct moving_io, s);
+
+ if (!s->error) {
+ trace_bcache_write_moving(&io->bio.bio);
+
+ moving_init(io);
+
+ io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ s->op.lock = -1;
+ s->op.write_prio = 1;
+ s->op.cache_bio = &io->bio.bio;
+
+ s->writeback = KEY_DIRTY(&io->w->key);
+ s->op.csum = KEY_CSUM(&io->w->key);
+
+ s->op.type = BTREE_REPLACE;
+ bkey_copy(&s->op.replace, &io->w->key);
+
+ closure_init(&s->op.cl, cl);
+ bch_insert_data(&s->op.cl);
+ }
+
+ continue_at(cl, write_moving_finish, NULL);
+}
+
+static void read_moving_submit(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct moving_io *io = container_of(s, struct moving_io, s);
+ struct bio *bio = &io->bio.bio;
+
+ trace_bcache_read_moving(bio);
+ bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
+
+ continue_at(cl, write_moving, bch_gc_wq);
+}
+
+static void read_moving(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
+ struct keybuf_key *w;
+ struct moving_io *io;
+ struct bio *bio;
+
+ /* XXX: if we error, background writeback could stall indefinitely */
+
+ while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
+ w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY);
+ if (!w)
+ break;
+
+ io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
+ * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->w = w;
+ io->s.op.inode = KEY_INODE(&w->key);
+ io->s.op.c = c;
+
+ moving_init(io);
+ bio = &io->bio.bio;
+
+ bio->bi_rw = READ;
+ bio->bi_end_io = read_moving_endio;
+
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
+ goto err;
+
+ pr_debug("%s", pkey(&w->key));
+
+ closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
+
+ if (atomic_inc_return(&c->in_flight) >= 64) {
+ closure_wait_event(&c->moving_gc_wait, cl,
+ atomic_read(&c->in_flight) < 64);
+ continue_at(cl, read_moving, bch_gc_wq);
+ }
+ }
+
+ if (0) {
+err: if (!IS_ERR_OR_NULL(w->private))
+ kfree(w->private);
+
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ }
+
+ closure_return(cl);
+}
+
+static bool bucket_cmp(struct bucket *l, struct bucket *r)
+{
+ return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+}
+
+static unsigned bucket_heap_top(struct cache *ca)
+{
+ return GC_SECTORS_USED(heap_peek(&ca->heap));
+}
+
+void bch_moving_gc(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
+ struct cache *ca;
+ struct bucket *b;
+ unsigned i;
+
+ if (!c->copy_gc_enabled)
+ closure_return(cl);
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i) {
+ unsigned sectors_to_move = 0;
+ unsigned reserve_sectors = ca->sb.bucket_size *
+ min(fifo_used(&ca->free), ca->free.size / 2);
+
+ ca->heap.used = 0;
+
+ for_each_bucket(b, ca) {
+ if (!GC_SECTORS_USED(b))
+ continue;
+
+ if (!heap_full(&ca->heap)) {
+ sectors_to_move += GC_SECTORS_USED(b);
+ heap_add(&ca->heap, b, bucket_cmp);
+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+ sectors_to_move -= bucket_heap_top(ca);
+ sectors_to_move += GC_SECTORS_USED(b);
+
+ ca->heap.data[0] = b;
+ heap_sift(&ca->heap, 0, bucket_cmp);
+ }
+ }
+
+ while (sectors_to_move > reserve_sectors) {
+ heap_pop(&ca->heap, b, bucket_cmp);
+ sectors_to_move -= GC_SECTORS_USED(b);
+ }
+
+ ca->gc_move_threshold = bucket_heap_top(ca);
+
+ pr_debug("threshold %u", ca->gc_move_threshold);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+
+ c->moving_gc_keys.last_scanned = ZERO_KEY;
+
+ closure_init(&c->moving_gc, cl);
+ read_moving(&c->moving_gc);
+
+ closure_return(cl);
+}
+
+void bch_moving_init_cache_set(struct cache_set *c)
+{
+ bch_keybuf_init(&c->moving_gc_keys, moving_pred);
+}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
new file mode 100644
index 0000000..e5ff12e
--- /dev/null
+++ b/drivers/md/bcache/request.c
@@ -0,0 +1,1411 @@
+/*
+ * Main bcache entry point - handle a read or a write request and decide what to
+ * do with it; the make_request functions are called by the block layer.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/cgroup.h>
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include "blk-cgroup.h"
+
+#include <trace/events/bcache.h>
+
+#define CUTOFF_CACHE_ADD 95
+#define CUTOFF_CACHE_READA 90
+#define CUTOFF_WRITEBACK 50
+#define CUTOFF_WRITEBACK_SYNC 75
+
+struct kmem_cache *bch_search_cache;
+
+static void check_should_skip(struct cached_dev *, struct search *);
+
+/* Cgroup interface */
+
+#ifdef CONFIG_CGROUP_BCACHE
+static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
+
+static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
+{
+ struct cgroup_subsys_state *css;
+ return cgroup &&
+ (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
+ ? container_of(css, struct bch_cgroup, css)
+ : &bcache_default_cgroup;
+}
+
+struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
+{
+ struct cgroup_subsys_state *css = bio->bi_css
+ ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
+ : task_subsys_state(current, bcache_subsys_id);
+
+ return css
+ ? container_of(css, struct bch_cgroup, css)
+ : &bcache_default_cgroup;
+}
+
+static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
+ struct file *file,
+ char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ char tmp[1024];
+ int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
+ cgroup_to_bcache(cgrp)->cache_mode + 1);
+
+ if (len < 0)
+ return len;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+}
+
+static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ int v = bch_read_string_list(buf, bch_cache_modes);
+ if (v < 0)
+ return v;
+
+ cgroup_to_bcache(cgrp)->cache_mode = v - 1;
+ return 0;
+}
+
+static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ return cgroup_to_bcache(cgrp)->verify;
+}
+
+static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+ cgroup_to_bcache(cgrp)->verify = val;
+ return 0;
+}
+
+static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+ return atomic_read(&bcachecg->stats.cache_hits);
+}
+
+static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+ return atomic_read(&bcachecg->stats.cache_misses);
+}
+
+static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
+ struct cftype *cft)
+{
+ struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+ return atomic_read(&bcachecg->stats.cache_bypass_hits);
+}
+
+static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
+ struct cftype *cft)
+{
+ struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+ return atomic_read(&bcachecg->stats.cache_bypass_misses);
+}
+
+static struct cftype bch_files[] = {
+ {
+ .name = "cache_mode",
+ .read = cache_mode_read,
+ .write_string = cache_mode_write,
+ },
+ {
+ .name = "verify",
+ .read_u64 = bch_verify_read,
+ .write_u64 = bch_verify_write,
+ },
+ {
+ .name = "cache_hits",
+ .read_u64 = bch_cache_hits_read,
+ },
+ {
+ .name = "cache_misses",
+ .read_u64 = bch_cache_misses_read,
+ },
+ {
+ .name = "cache_bypass_hits",
+ .read_u64 = bch_cache_bypass_hits_read,
+ },
+ {
+ .name = "cache_bypass_misses",
+ .read_u64 = bch_cache_bypass_misses_read,
+ },
+ { } /* terminate */
+};
+
+static void init_bch_cgroup(struct bch_cgroup *cg)
+{
+ cg->cache_mode = -1;
+}
+
+static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
+{
+ struct bch_cgroup *cg;
+
+ cg = kzalloc(sizeof(*cg), GFP_KERNEL);
+ if (!cg)
+ return ERR_PTR(-ENOMEM);
+ init_bch_cgroup(cg);
+ return &cg->css;
+}
+
+static void bcachecg_destroy(struct cgroup *cgroup)
+{
+ struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
+ free_css_id(&bcache_subsys, &cg->css);
+ kfree(cg);
+}
+
+struct cgroup_subsys bcache_subsys = {
+ .create = bcachecg_create,
+ .destroy = bcachecg_destroy,
+ .subsys_id = bcache_subsys_id,
+ .name = "bcache",
+ .module = THIS_MODULE,
+};
+EXPORT_SYMBOL_GPL(bcache_subsys);
+#endif
+
+static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
+{
+#ifdef CONFIG_CGROUP_BCACHE
+ int r = bch_bio_to_cgroup(bio)->cache_mode;
+ if (r >= 0)
+ return r;
+#endif
+ return BDEV_CACHE_MODE(&dc->sb);
+}
+
+static bool verify(struct cached_dev *dc, struct bio *bio)
+{
+#ifdef CONFIG_CGROUP_BCACHE
+ if (bch_bio_to_cgroup(bio)->verify)
+ return true;
+#endif
+ return dc->verify;
+}
+
+static void bio_csum(struct bio *bio, struct bkey *k)
+{
+ struct bio_vec *bv;
+ uint64_t csum = 0;
+ int i;
+
+ bio_for_each_segment(bv, bio, i) {
+ void *d = kmap(bv->bv_page) + bv->bv_offset;
+ csum = bch_crc64_update(csum, d, bv->bv_len);
+ kunmap(bv->bv_page);
+ }
+
+ k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
+}
+
+/* Insert data into cache */
+
+static void bio_invalidate(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct bio *bio = op->cache_bio;
+
+ pr_debug("invalidating %i sectors from %llu",
+ bio_sectors(bio), (uint64_t) bio->bi_sector);
+
+ while (bio_sectors(bio)) {
+ unsigned len = min(bio_sectors(bio), 1U << 14);
+
+ if (bch_keylist_realloc(&op->keys, 0, op->c))
+ goto out;
+
+ bio->bi_sector += len;
+ bio->bi_size -= len << 9;
+
+ bch_keylist_add(&op->keys,
+ &KEY(op->inode, bio->bi_sector, len));
+ }
+
+ op->insert_data_done = true;
+ bio_put(bio);
+out:
+ continue_at(cl, bch_journal, bcache_wq);
+}
+
+struct open_bucket {
+ struct list_head list;
+ struct task_struct *last;
+ unsigned sectors_free;
+ BKEY_PADDED(key);
+};
+
+void bch_open_buckets_free(struct cache_set *c)
+{
+ struct open_bucket *b;
+
+ while (!list_empty(&c->data_buckets)) {
+ b = list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+ int i;
+
+ spin_lock_init(&c->data_bucket_lock);
+
+ for (i = 0; i < 6; i++) {
+ struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ list_add(&b->list, &c->data_buckets);
+ }
+
+ return 0;
+}
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we look for a bucket where
+ * the last write to it was sequential with the current write, and failing that
+ * we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ const struct bkey *search,
+ struct task_struct *task,
+ struct bkey *alloc)
+{
+ struct open_bucket *ret, *ret_task = NULL;
+
+ list_for_each_entry_reverse(ret, &c->data_buckets, list)
+ if (!bkey_cmp(&ret->key, search))
+ goto found;
+ else if (ret->last == task)
+ ret_task = ret;
+
+ ret = ret_task ?: list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+ ret->sectors_free = c->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+
+ if (!ret->sectors_free)
+ ret = NULL;
+
+ return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
+ struct search *s)
+{
+ struct cache_set *c = s->op.c;
+ struct open_bucket *b;
+ BKEY_PADDED(key) alloc;
+ struct closure cl, *w = NULL;
+ unsigned i;
+
+ if (s->writeback) {
+ closure_init_stack(&cl);
+ w = &cl;
+ }
+
+ /*
+ * We might have to allocate a new bucket, which we can't do with a
+ * spinlock held. So if we have to allocate, we drop the lock, allocate
+ * and then retry. KEY_PTRS() indicates whether alloc points to
+ * allocated bucket(s).
+ */
+
+ bkey_init(&alloc.key);
+ spin_lock(&c->data_bucket_lock);
+
+ while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
+ unsigned watermark = s->op.write_prio
+ ? WATERMARK_MOVINGGC
+ : WATERMARK_NONE;
+
+ spin_unlock(&c->data_bucket_lock);
+
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+ }
+
+ /*
+ * If we had to allocate, we might race and not need to allocate the
+ * second time we call find_data_bucket(). If we allocated a bucket but
+ * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+ */
+ if (KEY_PTRS(&alloc.key))
+ __bkey_put(c, &alloc.key);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ EBUG_ON(ptr_stale(c, &b->key, i));
+
+ /* Set up the pointer to the space we're allocating: */
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ k->ptr[i] = b->key.ptr[i];
+
+ sectors = min(sectors, b->sectors_free);
+
+ SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+ SET_KEY_SIZE(k, sectors);
+ SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+ /*
+ * Move b to the end of the lru, and keep track of what this bucket was
+ * last used for:
+ */
+ list_move_tail(&b->list, &c->data_buckets);
+ bkey_copy_key(&b->key, k);
+ b->last = s->task;
+
+ b->sectors_free -= sectors;
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+ atomic_long_add(sectors,
+ &PTR_CACHE(c, &b->key, i)->sectors_written);
+ }
+
+ if (b->sectors_free < c->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+ * k takes refcounts on the buckets it points to until it's inserted
+ * into the btree, but if we're done with this bucket we just transfer
+ * get_data_bucket()'s refcount.
+ */
+ if (b->sectors_free)
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+ spin_unlock(&c->data_bucket_lock);
+ return true;
+}
+
+static void bch_insert_data_error(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+
+ /*
+ * Our data write just errored, which means we've got a bunch of keys to
+ * insert that point to data that wasn't succesfully written.
+ *
+ * We don't have to insert those keys but we still have to invalidate
+ * that region of the cache - so, if we just strip off all the pointers
+ * from the keys we'll accomplish just that.
+ */
+
+ struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
+
+ while (src != op->keys.top) {
+ struct bkey *n = bkey_next(src);
+
+ SET_KEY_PTRS(src, 0);
+ bkey_copy(dst, src);
+
+ dst = bkey_next(dst);
+ src = n;
+ }
+
+ op->keys.top = dst;
+
+ bch_journal(cl);
+}
+
+static void bch_insert_data_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct search *s = container_of(op, struct search, op);
+
+ if (error) {
+ /* TODO: We could try to recover from this. */
+ if (s->writeback)
+ s->error = error;
+ else if (s->write)
+ set_closure_fn(cl, bch_insert_data_error, bcache_wq);
+ else
+ set_closure_fn(cl, NULL, NULL);
+ }
+
+ bch_bbio_endio(op->c, bio, error, "writing data to cache");
+}
+
+static void bch_insert_data_loop(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct search *s = container_of(op, struct search, op);
+ struct bio *bio = op->cache_bio, *n;
+
+ if (op->skip)
+ return bio_invalidate(cl);
+
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
+ set_gc_sectors(op->c);
+ bch_queue_gc(op->c);
+ }
+
+ do {
+ unsigned i;
+ struct bkey *k;
+ struct bio_set *split = s->d
+ ? s->d->bio_split : op->c->bio_split;
+
+ /* 1 for the device pointer and 1 for the chksum */
+ if (bch_keylist_realloc(&op->keys,
+ 1 + (op->csum ? 1 : 0),
+ op->c))
+ continue_at(cl, bch_journal, bcache_wq);
+
+ k = op->keys.top;
+ bkey_init(k);
+ SET_KEY_INODE(k, op->inode);
+ SET_KEY_OFFSET(k, bio->bi_sector);
+
+ if (!bch_alloc_sectors(k, bio_sectors(bio), s))
+ goto err;
+
+ n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+ if (!n) {
+ __bkey_put(op->c, k);
+ continue_at(cl, bch_insert_data_loop, bcache_wq);
+ }
+
+ n->bi_end_io = bch_insert_data_endio;
+ n->bi_private = cl;
+
+ if (s->writeback) {
+ SET_KEY_DIRTY(k, true);
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_GC_MARK(PTR_BUCKET(op->c, k, i),
+ GC_MARK_DIRTY);
+ }
+
+ SET_KEY_CSUM(k, op->csum);
+ if (KEY_CSUM(k))
+ bio_csum(n, k);
+
+ pr_debug("%s", pkey(k));
+ bch_keylist_push(&op->keys);
+
+ trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
+ n->bi_rw |= REQ_WRITE;
+ bch_submit_bbio(n, op->c, k, 0);
+ } while (n != bio);
+
+ op->insert_data_done = true;
+ continue_at(cl, bch_journal, bcache_wq);
+err:
+ /* bch_alloc_sectors() blocks if s->writeback = true */
+ BUG_ON(s->writeback);
+
+ /*
+ * But if it's not a writeback write we'd rather just bail out if
+ * there aren't any buckets ready to write to - it might take awhile and
+ * we might be starving btree writes for gc or something.
+ */
+
+ if (s->write) {
+ /*
+ * Writethrough write: We can't complete the write until we've
+ * updated the index. But we don't want to delay the write while
+ * we wait for buckets to be freed up, so just invalidate the
+ * rest of the write.
+ */
+ op->skip = true;
+ return bio_invalidate(cl);
+ } else {
+ /*
+ * From a cache miss, we can just insert the keys for the data
+ * we have written or bail out if we didn't do anything.
+ */
+ op->insert_data_done = true;
+ bio_put(bio);
+
+ if (!bch_keylist_empty(&op->keys))
+ continue_at(cl, bch_journal, bcache_wq);
+ else
+ closure_return(cl);
+ }
+}
+
+/**
+ * bch_insert_data - stick some data in the cache
+ *
+ * This is the starting point for any data to end up in a cache device; it could
+ * be from a normal write, or a writeback write, or a write to a flash only
+ * volume - it's also used by the moving garbage collector to compact data in
+ * mostly empty buckets.
+ *
+ * It first writes the data to the cache, creating a list of keys to be inserted
+ * (if the data had to be fragmented there will be multiple keys); after the
+ * data is written it calls bch_journal, and after the keys have been added to
+ * the next journal write they're inserted into the btree.
+ *
+ * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
+ * and op->inode is used for the key inode.
+ *
+ * If op->skip is true, instead of inserting the data it invalidates the region
+ * of the cache represented by op->cache_bio and op->inode.
+ */
+void bch_insert_data(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+
+ bch_keylist_init(&op->keys);
+ bio_get(op->cache_bio);
+ bch_insert_data_loop(cl);
+}
+
+void bch_btree_insert_async(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct search *s = container_of(op, struct search, op);
+
+ if (bch_btree_insert(op, op->c)) {
+ s->error = -ENOMEM;
+ op->insert_data_done = true;
+ }
+
+ if (op->insert_data_done) {
+ bch_keylist_free(&op->keys);
+ closure_return(cl);
+ } else
+ continue_at(cl, bch_insert_data_loop, bcache_wq);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+
+ if (error) {
+ struct search *s = container_of(cl, struct search, cl);
+ s->error = error;
+ /* Only cache read errors are recoverable */
+ s->recoverable = false;
+ }
+
+ bio_put(bio);
+ closure_put(cl);
+}
+
+void bch_cache_read_endio(struct bio *bio, int error)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ struct closure *cl = bio->bi_private;
+ struct search *s = container_of(cl, struct search, cl);
+
+ /*
+ * If the bucket was reused while our bio was in flight, we might have
+ * read the wrong data. Set s->error but not error so it doesn't get
+ * counted against the cache device, but we'll still reread the data
+ * from the backing device.
+ */
+
+ if (error)
+ s->error = error;
+ else if (ptr_stale(s->op.c, &b->key, 0)) {
+ atomic_long_inc(&s->op.c->cache_read_races);
+ s->error = -EINTR;
+ }
+
+ bch_bbio_endio(s->op.c, bio, error, "reading from cache");
+}
+
+static void bio_complete(struct search *s)
+{
+ if (s->orig_bio) {
+ int cpu, rw = bio_data_dir(s->orig_bio);
+ unsigned long duration = jiffies - s->start_time;
+
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &s->d->disk->part0);
+ part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
+ part_stat_unlock();
+
+ trace_bcache_request_end(s, s->orig_bio);
+ bio_endio(s->orig_bio, s->error);
+ s->orig_bio = NULL;
+ }
+}
+
+static void do_bio_hook(struct search *s)
+{
+ struct bio *bio = &s->bio.bio;
+ memcpy(bio, s->orig_bio, sizeof(struct bio));
+
+ bio->bi_end_io = request_endio;
+ bio->bi_private = &s->cl;
+ atomic_set(&bio->bi_cnt, 3);
+}
+
+static void search_free(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ bio_complete(s);
+
+ if (s->op.cache_bio)
+ bio_put(s->op.cache_bio);
+
+ if (s->unaligned_bvec)
+ mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
+
+ closure_debug_destroy(cl);
+ mempool_free(s, s->d->c->search);
+}
+
+static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+{
+ struct bio_vec *bv;
+ struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
+ memset(s, 0, offsetof(struct search, op.keys));
+
+ __closure_init(&s->cl, NULL);
+
+ s->op.inode = d->id;
+ s->op.c = d->c;
+ s->d = d;
+ s->op.lock = -1;
+ s->task = current;
+ s->orig_bio = bio;
+ s->write = (bio->bi_rw & REQ_WRITE) != 0;
+ s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
+ s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->recoverable = 1;
+ s->start_time = jiffies;
+ do_bio_hook(s);
+
+ if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
+ bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
+ memcpy(bv, bio_iovec(bio),
+ sizeof(struct bio_vec) * bio_segments(bio));
+
+ s->bio.bio.bi_io_vec = bv;
+ s->unaligned_bvec = 1;
+ }
+
+ return s;
+}
+
+static void btree_read_async(struct closure *cl)
+{
+ struct btree_op *op = container_of(cl, struct btree_op, cl);
+
+ int ret = btree_root(search_recurse, op->c, op);
+
+ if (ret == -EAGAIN)
+ continue_at(cl, btree_read_async, bcache_wq);
+
+ closure_return(cl);
+}
+
+/* Cached devices */
+
+static void cached_dev_bio_complete(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ search_free(cl);
+ cached_dev_put(dc);
+}
+
+/* Process reads */
+
+static void cached_dev_read_complete(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ if (s->op.insert_collision)
+ bch_mark_cache_miss_collision(s);
+
+ if (s->op.cache_bio) {
+ int i;
+ struct bio_vec *bv;
+
+ __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
+ __free_page(bv->bv_page);
+ }
+
+ cached_dev_bio_complete(cl);
+}
+
+static void request_read_error(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct bio_vec *bv;
+ int i;
+
+ if (s->recoverable) {
+ /* The cache read failed, but we can retry from the backing
+ * device.
+ */
+ pr_debug("recovering at sector %llu",
+ (uint64_t) s->orig_bio->bi_sector);
+
+ s->error = 0;
+ bv = s->bio.bio.bi_io_vec;
+ do_bio_hook(s);
+ s->bio.bio.bi_io_vec = bv;
+
+ if (!s->unaligned_bvec)
+ bio_for_each_segment(bv, s->orig_bio, i)
+ bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
+ else
+ memcpy(s->bio.bio.bi_io_vec,
+ bio_iovec(s->orig_bio),
+ sizeof(struct bio_vec) *
+ bio_segments(s->orig_bio));
+
+ /* XXX: invalidate cache */
+
+ trace_bcache_read_retry(&s->bio.bio);
+ closure_bio_submit(&s->bio.bio, &s->cl, s->d);
+ }
+
+ continue_at(cl, cached_dev_read_complete, NULL);
+}
+
+static void request_read_done(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ /*
+ * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
+ * contains data ready to be inserted into the cache.
+ *
+ * First, we copy the data we just read from cache_bio's bounce buffers
+ * to the buffers the original bio pointed to:
+ */
+
+ if (s->op.cache_bio) {
+ struct bio_vec *src, *dst;
+ unsigned src_offset, dst_offset, bytes;
+ void *dst_ptr;
+
+ bio_reset(s->op.cache_bio);
+ s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
+ s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
+ s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
+ bch_bio_map(s->op.cache_bio, NULL);
+
+ src = bio_iovec(s->op.cache_bio);
+ dst = bio_iovec(s->cache_miss);
+ src_offset = src->bv_offset;
+ dst_offset = dst->bv_offset;
+ dst_ptr = kmap(dst->bv_page);
+
+ while (1) {
+ if (dst_offset == dst->bv_offset + dst->bv_len) {
+ kunmap(dst->bv_page);
+ dst++;
+ if (dst == bio_iovec_idx(s->cache_miss,
+ s->cache_miss->bi_vcnt))
+ break;
+
+ dst_offset = dst->bv_offset;
+ dst_ptr = kmap(dst->bv_page);
+ }
+
+ if (src_offset == src->bv_offset + src->bv_len) {
+ src++;
+ if (src == bio_iovec_idx(s->op.cache_bio,
+ s->op.cache_bio->bi_vcnt))
+ BUG();
+
+ src_offset = src->bv_offset;
+ }
+
+ bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
+ src->bv_offset + src->bv_len - src_offset);
+
+ memcpy(dst_ptr + dst_offset,
+ page_address(src->bv_page) + src_offset,
+ bytes);
+
+ src_offset += bytes;
+ dst_offset += bytes;
+ }
+
+ bio_put(s->cache_miss);
+ s->cache_miss = NULL;
+ }
+
+ if (verify(dc, &s->bio.bio) && s->recoverable)
+ bch_data_verify(s);
+
+ bio_complete(s);
+
+ if (s->op.cache_bio &&
+ !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
+ s->op.type = BTREE_REPLACE;
+ closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ }
+
+ continue_at(cl, cached_dev_read_complete, NULL);
+}
+
+static void request_read_done_bh(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
+
+ if (s->error)
+ continue_at_nobarrier(cl, request_read_error, bcache_wq);
+ else if (s->op.cache_bio || verify(dc, &s->bio.bio))
+ continue_at_nobarrier(cl, request_read_done, bcache_wq);
+ else
+ continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
+}
+
+static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ struct bio *bio, unsigned sectors)
+{
+ int ret = 0;
+ unsigned reada;
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct bio *miss;
+
+ miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ if (!miss)
+ return -EAGAIN;
+
+ if (miss == bio)
+ s->op.lookup_done = true;
+
+ miss->bi_end_io = request_endio;
+ miss->bi_private = &s->cl;
+
+ if (s->cache_miss || s->op.skip)
+ goto out_submit;
+
+ if (miss != bio ||
+ (bio->bi_rw & REQ_RAHEAD) ||
+ (bio->bi_rw & REQ_META) ||
+ s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
+ reada = 0;
+ else {
+ reada = min(dc->readahead >> 9,
+ sectors - bio_sectors(miss));
+
+ if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
+ reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
+ }
+
+ s->cache_bio_sectors = bio_sectors(miss) + reada;
+ s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
+ dc->disk.bio_split);
+
+ if (!s->op.cache_bio)
+ goto out_submit;
+
+ s->op.cache_bio->bi_sector = miss->bi_sector;
+ s->op.cache_bio->bi_bdev = miss->bi_bdev;
+ s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
+
+ s->op.cache_bio->bi_end_io = request_endio;
+ s->op.cache_bio->bi_private = &s->cl;
+
+ /* btree_search_recurse()'s btree iterator is no good anymore */
+ ret = -EINTR;
+ if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
+ goto out_put;
+
+ bch_bio_map(s->op.cache_bio, NULL);
+ if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
+ goto out_put;
+
+ s->cache_miss = miss;
+ bio_get(s->op.cache_bio);
+
+ trace_bcache_cache_miss(s->orig_bio);
+ closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
+
+ return ret;
+out_put:
+ bio_put(s->op.cache_bio);
+ s->op.cache_bio = NULL;
+out_submit:
+ closure_bio_submit(miss, &s->cl, s->d);
+ return ret;
+}
+
+static void request_read(struct cached_dev *dc, struct search *s)
+{
+ struct closure *cl = &s->cl;
+
+ check_should_skip(dc, s);
+ closure_call(&s->op.cl, btree_read_async, NULL, cl);
+
+ continue_at(cl, request_read_done_bh, NULL);
+}
+
+/* Process writes */
+
+static void cached_dev_write_complete(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ up_read_non_owner(&dc->writeback_lock);
+ cached_dev_bio_complete(cl);
+}
+
+static bool should_writeback(struct cached_dev *dc, struct bio *bio)
+{
+ unsigned threshold = (bio->bi_rw & REQ_SYNC)
+ ? CUTOFF_WRITEBACK_SYNC
+ : CUTOFF_WRITEBACK;
+
+ return !atomic_read(&dc->disk.detaching) &&
+ cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
+ dc->disk.c->gc_stats.in_use < threshold;
+}
+
+static void request_write(struct cached_dev *dc, struct search *s)
+{
+ struct closure *cl = &s->cl;
+ struct bio *bio = &s->bio.bio;
+ struct bkey start, end;
+ start = KEY(dc->disk.id, bio->bi_sector, 0);
+ end = KEY(dc->disk.id, bio_end(bio), 0);
+
+ bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
+
+ check_should_skip(dc, s);
+ down_read_non_owner(&dc->writeback_lock);
+
+ if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
+ s->op.skip = false;
+ s->writeback = true;
+ }
+
+ if (bio->bi_rw & REQ_DISCARD)
+ goto skip;
+
+ if (s->op.skip)
+ goto skip;
+
+ if (should_writeback(dc, s->orig_bio))
+ s->writeback = true;
+
+ if (!s->writeback) {
+ s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
+ dc->disk.bio_split);
+
+ trace_bcache_writethrough(s->orig_bio);
+ closure_bio_submit(bio, cl, s->d);
+ } else {
+ s->op.cache_bio = bio;
+ trace_bcache_writeback(s->orig_bio);
+ bch_writeback_add(dc, bio_sectors(bio));
+ }
+out:
+ closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ continue_at(cl, cached_dev_write_complete, NULL);
+skip:
+ s->op.skip = true;
+ s->op.cache_bio = s->orig_bio;
+ bio_get(s->op.cache_bio);
+ trace_bcache_write_skip(s->orig_bio);
+
+ if ((bio->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ goto out;
+
+ closure_bio_submit(bio, cl, s->d);
+ goto out;
+}
+
+static void request_nodata(struct cached_dev *dc, struct search *s)
+{
+ struct closure *cl = &s->cl;
+ struct bio *bio = &s->bio.bio;
+
+ if (bio->bi_rw & REQ_DISCARD) {
+ request_write(dc, s);
+ return;
+ }
+
+ if (s->op.flush_journal)
+ bch_journal_meta(s->op.c, cl);
+
+ closure_bio_submit(bio, cl, s->d);
+
+ continue_at(cl, cached_dev_bio_complete, NULL);
+}
+
+/* Cached devices - read & write stuff */
+
+int bch_get_congested(struct cache_set *c)
+{
+ int i;
+
+ if (!c->congested_read_threshold_us &&
+ !c->congested_write_threshold_us)
+ return 0;
+
+ i = (local_clock_us() - c->congested_last_us) / 1024;
+ if (i < 0)
+ return 0;
+
+ i += atomic_read(&c->congested);
+ if (i >= 0)
+ return 0;
+
+ i += CONGESTED_MAX;
+
+ return i <= 0 ? 1 : fract_exp_two(i, 6);
+}
+
+static void add_sequential(struct task_struct *t)
+{
+ ewma_add(t->sequential_io_avg,
+ t->sequential_io, 8, 0);
+
+ t->sequential_io = 0;
+}
+
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
+{
+ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
+
+static void check_should_skip(struct cached_dev *dc, struct search *s)
+{
+ struct cache_set *c = s->op.c;
+ struct bio *bio = &s->bio.bio;
+
+ long rand;
+ int cutoff = bch_get_congested(c);
+ unsigned mode = cache_mode(dc, bio);
+
+ if (atomic_read(&dc->disk.detaching) ||
+ c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
+ (bio->bi_rw & REQ_DISCARD))
+ goto skip;
+
+ if (mode == CACHE_MODE_NONE ||
+ (mode == CACHE_MODE_WRITEAROUND &&
+ (bio->bi_rw & REQ_WRITE)))
+ goto skip;
+
+ if (bio->bi_sector & (c->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io");
+ goto skip;
+ }
+
+ if (!cutoff) {
+ cutoff = dc->sequential_cutoff >> 9;
+
+ if (!cutoff)
+ goto rescale;
+
+ if (mode == CACHE_MODE_WRITEBACK &&
+ (bio->bi_rw & REQ_WRITE) &&
+ (bio->bi_rw & REQ_SYNC))
+ goto rescale;
+ }
+
+ if (dc->sequential_merge) {
+ struct io *i;
+
+ spin_lock(&dc->io_lock);
+
+ hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
+ if (i->last == bio->bi_sector &&
+ time_before(jiffies, i->jiffies))
+ goto found;
+
+ i = list_first_entry(&dc->io_lru, struct io, lru);
+
+ add_sequential(s->task);
+ i->sequential = 0;
+found:
+ if (i->sequential + bio->bi_size > i->sequential)
+ i->sequential += bio->bi_size;
+
+ i->last = bio_end(bio);
+ i->jiffies = jiffies + msecs_to_jiffies(5000);
+ s->task->sequential_io = i->sequential;
+
+ hlist_del(&i->hash);
+ hlist_add_head(&i->hash, iohash(dc, i->last));
+ list_move_tail(&i->lru, &dc->io_lru);
+
+ spin_unlock(&dc->io_lock);
+ } else {
+ s->task->sequential_io = bio->bi_size;
+
+ add_sequential(s->task);
+ }
+
+ rand = get_random_int();
+ cutoff -= bitmap_weight(&rand, BITS_PER_LONG);
+
+ if (cutoff <= (int) (max(s->task->sequential_io,
+ s->task->sequential_io_avg) >> 9))
+ goto skip;
+
+rescale:
+ bch_rescale_priorities(c, bio_sectors(bio));
+ return;
+skip:
+ bch_mark_sectors_bypassed(s, bio_sectors(bio));
+ s->op.skip = true;
+}
+
+static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct search *s;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ int cpu, rw = bio_data_dir(bio);
+
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &d->disk->part0, ios[rw]);
+ part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
+ part_stat_unlock();
+
+ bio->bi_bdev = dc->bdev;
+ bio->bi_sector += dc->sb.data_offset;
+
+ if (cached_dev_get(dc)) {
+ s = search_alloc(bio, d);
+ trace_bcache_request_start(s, bio);
+
+ if (!bio_has_data(bio))
+ request_nodata(dc, s);
+ else if (rw)
+ request_write(dc, s);
+ else
+ request_read(dc, s);
+ } else {
+ if ((bio->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ bio_endio(bio, 0);
+ else
+ bch_generic_make_request(bio, &d->bio_split_hook);
+ }
+}
+
+static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
+}
+
+static int cached_dev_congested(void *data, int bits)
+{
+ struct bcache_device *d = data;
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ struct request_queue *q = bdev_get_queue(dc->bdev);
+ int ret = 0;
+
+ if (bdi_congested(&q->backing_dev_info, bits))
+ return 1;
+
+ if (cached_dev_get(dc)) {
+ unsigned i;
+ struct cache *ca;
+
+ for_each_cache(ca, d->c, i) {
+ q = bdev_get_queue(ca->bdev);
+ ret |= bdi_congested(&q->backing_dev_info, bits);
+ }
+
+ cached_dev_put(dc);
+ }
+
+ return ret;
+}
+
+void bch_cached_dev_request_init(struct cached_dev *dc)
+{
+ struct gendisk *g = dc->disk.disk;
+
+ g->queue->make_request_fn = cached_dev_make_request;
+ g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+ dc->disk.cache_miss = cached_dev_cache_miss;
+ dc->disk.ioctl = cached_dev_ioctl;
+}
+
+/* Flash backed devices */
+
+static int flash_dev_cache_miss(struct btree *b, struct search *s,
+ struct bio *bio, unsigned sectors)
+{
+ /* Zero fill bio */
+
+ while (bio->bi_idx != bio->bi_vcnt) {
+ struct bio_vec *bv = bio_iovec(bio);
+ unsigned j = min(bv->bv_len >> 9, sectors);
+
+ void *p = kmap(bv->bv_page);
+ memset(p + bv->bv_offset, 0, j << 9);
+ kunmap(bv->bv_page);
+
+ bv->bv_len -= j << 9;
+ bv->bv_offset += j << 9;
+
+ if (bv->bv_len)
+ return 0;
+
+ bio->bi_sector += j;
+ bio->bi_size -= j << 9;
+
+ bio->bi_idx++;
+ sectors -= j;
+ }
+
+ s->op.lookup_done = true;
+
+ return 0;
+}
+
+static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct search *s;
+ struct closure *cl;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+ int cpu, rw = bio_data_dir(bio);
+
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &d->disk->part0, ios[rw]);
+ part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
+ part_stat_unlock();
+
+ s = search_alloc(bio, d);
+ cl = &s->cl;
+ bio = &s->bio.bio;
+
+ trace_bcache_request_start(s, bio);
+
+ if (bio_has_data(bio) && !rw) {
+ closure_call(&s->op.cl, btree_read_async, NULL, cl);
+ } else if (bio_has_data(bio) || s->op.skip) {
+ bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
+ &KEY(d->id, bio->bi_sector, 0),
+ &KEY(d->id, bio_end(bio), 0));
+
+ s->writeback = true;
+ s->op.cache_bio = bio;
+
+ closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ } else {
+ /* No data - probably a cache flush */
+ if (s->op.flush_journal)
+ bch_journal_meta(s->op.c, cl);
+ }
+
+ continue_at(cl, search_free, NULL);
+}
+
+static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static int flash_dev_congested(void *data, int bits)
+{
+ struct bcache_device *d = data;
+ struct request_queue *q;
+ struct cache *ca;
+ unsigned i;
+ int ret = 0;
+
+ for_each_cache(ca, d->c, i) {
+ q = bdev_get_queue(ca->bdev);
+ ret |= bdi_congested(&q->backing_dev_info, bits);
+ }
+
+ return ret;
+}
+
+void bch_flash_dev_request_init(struct bcache_device *d)
+{
+ struct gendisk *g = d->disk;
+
+ g->queue->make_request_fn = flash_dev_make_request;
+ g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+ d->cache_miss = flash_dev_cache_miss;
+ d->ioctl = flash_dev_ioctl;
+}
+
+void bch_request_exit(void)
+{
+#ifdef CONFIG_CGROUP_BCACHE
+ cgroup_unload_subsys(&bcache_subsys);
+#endif
+ if (bch_search_cache)
+ kmem_cache_destroy(bch_search_cache);
+}
+
+int __init bch_request_init(void)
+{
+ bch_search_cache = KMEM_CACHE(search, 0);
+ if (!bch_search_cache)
+ return -ENOMEM;
+
+#ifdef CONFIG_CGROUP_BCACHE
+ cgroup_load_subsys(&bcache_subsys);
+ init_bch_cgroup(&bcache_default_cgroup);
+
+ cgroup_add_cftypes(&bcache_subsys, bch_files);
+#endif
+ return 0;
+}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
new file mode 100644
index 0000000..254d9ab
--- /dev/null
+++ b/drivers/md/bcache/request.h
@@ -0,0 +1,62 @@
+#ifndef _BCACHE_REQUEST_H_
+#define _BCACHE_REQUEST_H_
+
+#include <linux/cgroup.h>
+
+struct search {
+ /* Stack frame for bio_complete */
+ struct closure cl;
+
+ struct bcache_device *d;
+ struct task_struct *task;
+
+ struct bbio bio;
+ struct bio *orig_bio;
+ struct bio *cache_miss;
+ unsigned cache_bio_sectors;
+
+ unsigned recoverable:1;
+ unsigned unaligned_bvec:1;
+
+ unsigned write:1;
+ unsigned writeback:1;
+
+ /* IO error returned to s->bio */
+ short error;
+ unsigned long start_time;
+
+ /* Anything past op->keys won't get zeroed in do_bio_hook */
+ struct btree_op op;
+};
+
+void bch_cache_read_endio(struct bio *, int);
+int bch_get_congested(struct cache_set *);
+void bch_insert_data(struct closure *cl);
+void bch_btree_insert_async(struct closure *);
+void bch_cache_read_endio(struct bio *, int);
+
+void bch_open_buckets_free(struct cache_set *);
+int bch_open_buckets_alloc(struct cache_set *);
+
+void bch_cached_dev_request_init(struct cached_dev *dc);
+void bch_flash_dev_request_init(struct bcache_device *d);
+
+extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
+
+struct bch_cgroup {
+#ifdef CONFIG_CGROUP_BCACHE
+ struct cgroup_subsys_state css;
+#endif
+ /*
+ * We subtract one from the index into bch_cache_modes[], so that
+ * default == -1; this makes it so the rest match up with d->cache_mode,
+ * and we use d->cache_mode if cgrp->cache_mode < 0
+ */
+ short cache_mode;
+ bool verify;
+ struct cache_stat_collector stats;
+};
+
+struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
+
+#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
new file mode 100644
index 0000000..64e6794
--- /dev/null
+++ b/drivers/md/bcache/stats.c
@@ -0,0 +1,246 @@
+/*
+ * bcache stats code
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "stats.h"
+#include "btree.h"
+#include "request.h"
+#include "sysfs.h"
+
+/*
+ * We keep absolute totals of various statistics, and addionally a set of three
+ * rolling averages.
+ *
+ * Every so often, a timer goes off and rescales the rolling averages.
+ * accounting_rescale[] is how many times the timer has to go off before we
+ * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
+ * and one day.
+ *
+ * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
+ * and accounting_weight is what we use to rescale:
+ *
+ * pow(31 / 32, 22) ~= 1/2
+ *
+ * So that we don't have to increment each set of numbers every time we (say)
+ * get a cache hit, we increment a single atomic_t in acc->collector, and when
+ * the rescale function runs it resets the atomic counter to 0 and adds its
+ * old value to each of the exported numbers.
+ *
+ * To reduce rounding error, the numbers in struct cache_stats are all
+ * stored left shifted by 16, and scaled back in the sysfs show() function.
+ */
+
+static const unsigned DAY_RESCALE = 288;
+static const unsigned HOUR_RESCALE = 12;
+static const unsigned FIVE_MINUTE_RESCALE = 1;
+static const unsigned accounting_delay = (HZ * 300) / 22;
+static const unsigned accounting_weight = 32;
+
+/* sysfs reading/writing */
+
+read_attribute(cache_hits);
+read_attribute(cache_misses);
+read_attribute(cache_bypass_hits);
+read_attribute(cache_bypass_misses);
+read_attribute(cache_hit_ratio);
+read_attribute(cache_readaheads);
+read_attribute(cache_miss_collisions);
+read_attribute(bypassed);
+
+SHOW(bch_stats)
+{
+ struct cache_stats *s =
+ container_of(kobj, struct cache_stats, kobj);
+#define var(stat) (s->stat >> 16)
+ var_print(cache_hits);
+ var_print(cache_misses);
+ var_print(cache_bypass_hits);
+ var_print(cache_bypass_misses);
+
+ sysfs_print(cache_hit_ratio,
+ DIV_SAFE(var(cache_hits) * 100,
+ var(cache_hits) + var(cache_misses)));
+
+ var_print(cache_readaheads);
+ var_print(cache_miss_collisions);
+ sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
+#undef var
+ return 0;
+}
+
+STORE(bch_stats)
+{
+ return size;
+}
+
+static void bch_stats_release(struct kobject *k)
+{
+}
+
+static struct attribute *bch_stats_files[] = {
+ &sysfs_cache_hits,
+ &sysfs_cache_misses,
+ &sysfs_cache_bypass_hits,
+ &sysfs_cache_bypass_misses,
+ &sysfs_cache_hit_ratio,
+ &sysfs_cache_readaheads,
+ &sysfs_cache_miss_collisions,
+ &sysfs_bypassed,
+ NULL
+};
+static KTYPE(bch_stats);
+
+static void scale_accounting(unsigned long data);
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent)
+{
+ kobject_init(&acc->total.kobj, &bch_stats_ktype);
+ kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
+ kobject_init(&acc->hour.kobj, &bch_stats_ktype);
+ kobject_init(&acc->day.kobj, &bch_stats_ktype);
+
+ closure_init(&acc->cl, parent);
+ init_timer(&acc->timer);
+ acc->timer.expires = jiffies + accounting_delay;
+ acc->timer.data = (unsigned long) acc;
+ acc->timer.function = scale_accounting;
+ add_timer(&acc->timer);
+}
+
+int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+ struct kobject *parent)
+{
+ int ret = kobject_add(&acc->total.kobj, parent,
+ "stats_total");
+ ret = ret ?: kobject_add(&acc->five_minute.kobj, parent,
+ "stats_five_minute");
+ ret = ret ?: kobject_add(&acc->hour.kobj, parent,
+ "stats_hour");
+ ret = ret ?: kobject_add(&acc->day.kobj, parent,
+ "stats_day");
+ return ret;
+}
+
+void bch_cache_accounting_clear(struct cache_accounting *acc)
+{
+ memset(&acc->total.cache_hits,
+ 0,
+ sizeof(unsigned long) * 7);
+}
+
+void bch_cache_accounting_destroy(struct cache_accounting *acc)
+{
+ kobject_put(&acc->total.kobj);
+ kobject_put(&acc->five_minute.kobj);
+ kobject_put(&acc->hour.kobj);
+ kobject_put(&acc->day.kobj);
+
+ atomic_set(&acc->closing, 1);
+ if (del_timer_sync(&acc->timer))
+ closure_return(&acc->cl);
+}
+
+/* EWMA scaling */
+
+static void scale_stat(unsigned long *stat)
+{
+ *stat = ewma_add(*stat, 0, accounting_weight, 0);
+}
+
+static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
+{
+ if (++stats->rescale == rescale_at) {
+ stats->rescale = 0;
+ scale_stat(&stats->cache_hits);
+ scale_stat(&stats->cache_misses);
+ scale_stat(&stats->cache_bypass_hits);
+ scale_stat(&stats->cache_bypass_misses);
+ scale_stat(&stats->cache_readaheads);
+ scale_stat(&stats->cache_miss_collisions);
+ scale_stat(&stats->sectors_bypassed);
+ }
+}
+
+static void scale_accounting(unsigned long data)
+{
+ struct cache_accounting *acc = (struct cache_accounting *) data;
+
+#define move_stat(name) do { \
+ unsigned t = atomic_xchg(&acc->collector.name, 0); \
+ t <<= 16; \
+ acc->five_minute.name += t; \
+ acc->hour.name += t; \
+ acc->day.name += t; \
+ acc->total.name += t; \
+} while (0)
+
+ move_stat(cache_hits);
+ move_stat(cache_misses);
+ move_stat(cache_bypass_hits);
+ move_stat(cache_bypass_misses);
+ move_stat(cache_readaheads);
+ move_stat(cache_miss_collisions);
+ move_stat(sectors_bypassed);
+
+ scale_stats(&acc->total, 0);
+ scale_stats(&acc->day, DAY_RESCALE);
+ scale_stats(&acc->hour, HOUR_RESCALE);
+ scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE);
+
+ acc->timer.expires += accounting_delay;
+
+ if (!atomic_read(&acc->closing))
+ add_timer(&acc->timer);
+ else
+ closure_return(&acc->cl);
+}
+
+static void mark_cache_stats(struct cache_stat_collector *stats,
+ bool hit, bool bypass)
+{
+ if (!bypass)
+ if (hit)
+ atomic_inc(&stats->cache_hits);
+ else
+ atomic_inc(&stats->cache_misses);
+ else
+ if (hit)
+ atomic_inc(&stats->cache_bypass_hits);
+ else
+ atomic_inc(&stats->cache_bypass_misses);
+}
+
+void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
+{
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ mark_cache_stats(&dc->accounting.collector, hit, bypass);
+ mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+#ifdef CONFIG_CGROUP_BCACHE
+ mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
+#endif
+}
+
+void bch_mark_cache_readahead(struct search *s)
+{
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ atomic_inc(&dc->accounting.collector.cache_readaheads);
+ atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+}
+
+void bch_mark_cache_miss_collision(struct search *s)
+{
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ atomic_inc(&dc->accounting.collector.cache_miss_collisions);
+ atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+}
+
+void bch_mark_sectors_bypassed(struct search *s, int sectors)
+{
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
+ atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+}
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
new file mode 100644
index 0000000..c7c7a8f
--- /dev/null
+++ b/drivers/md/bcache/stats.h
@@ -0,0 +1,58 @@
+#ifndef _BCACHE_STATS_H_
+#define _BCACHE_STATS_H_
+
+struct cache_stat_collector {
+ atomic_t cache_hits;
+ atomic_t cache_misses;
+ atomic_t cache_bypass_hits;
+ atomic_t cache_bypass_misses;
+ atomic_t cache_readaheads;
+ atomic_t cache_miss_collisions;
+ atomic_t sectors_bypassed;
+};
+
+struct cache_stats {
+ struct kobject kobj;
+
+ unsigned long cache_hits;
+ unsigned long cache_misses;
+ unsigned long cache_bypass_hits;
+ unsigned long cache_bypass_misses;
+ unsigned long cache_readaheads;
+ unsigned long cache_miss_collisions;
+ unsigned long sectors_bypassed;
+
+ unsigned rescale;
+};
+
+struct cache_accounting {
+ struct closure cl;
+ struct timer_list timer;
+ atomic_t closing;
+
+ struct cache_stat_collector collector;
+
+ struct cache_stats total;
+ struct cache_stats five_minute;
+ struct cache_stats hour;
+ struct cache_stats day;
+};
+
+struct search;
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent);
+
+int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+ struct kobject *parent);
+
+void bch_cache_accounting_clear(struct cache_accounting *acc);
+
+void bch_cache_accounting_destroy(struct cache_accounting *acc);
+
+void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
+void bch_mark_cache_readahead(struct search *s);
+void bch_mark_cache_miss_collision(struct search *s);
+void bch_mark_sectors_bypassed(struct search *s, int sectors);
+
+#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
new file mode 100644
index 0000000..c8046bc
--- /dev/null
+++ b/drivers/md/bcache/super.c
@@ -0,0 +1,1987 @@
+/*
+ * bcache setup/teardown code, and some metadata io - read a superblock and
+ * figure out what to do with it.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/buffer_head.h>
+#include <linux/debugfs.h>
+#include <linux/genhd.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/sysfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
+
+static const char bcache_magic[] = {
+ 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
+ 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
+};
+
+static const char invalid_uuid[] = {
+ 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
+ 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
+};
+
+/* Default is -1; we skip past it for struct cached_dev's cache mode */
+const char * const bch_cache_modes[] = {
+ "default",
+ "writethrough",
+ "writeback",
+ "writearound",
+ "none",
+ NULL
+};
+
+struct uuid_entry_v0 {
+ uint8_t uuid[16];
+ uint8_t label[32];
+ uint32_t first_reg;
+ uint32_t last_reg;
+ uint32_t invalidated;
+ uint32_t pad;
+};
+
+static struct kobject *bcache_kobj;
+struct mutex bch_register_lock;
+LIST_HEAD(bch_cache_sets);
+static LIST_HEAD(uncached_devices);
+
+static int bcache_major, bcache_minor;
+static wait_queue_head_t unregister_wait;
+struct workqueue_struct *bcache_wq;
+
+#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
+
+static void bio_split_pool_free(struct bio_split_pool *p)
+{
+ if (p->bio_split_hook)
+ mempool_destroy(p->bio_split_hook);
+
+ if (p->bio_split)
+ bioset_free(p->bio_split);
+}
+
+static int bio_split_pool_init(struct bio_split_pool *p)
+{
+ p->bio_split = bioset_create(4, 0);
+ if (!p->bio_split)
+ return -ENOMEM;
+
+ p->bio_split_hook = mempool_create_kmalloc_pool(4,
+ sizeof(struct bio_split_hook));
+ if (!p->bio_split_hook)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Superblock */
+
+static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
+ struct page **res)
+{
+ const char *err;
+ struct cache_sb *s;
+ struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+ unsigned i;
+
+ if (!bh)
+ return "IO error";
+
+ s = (struct cache_sb *) bh->b_data;
+
+ sb->offset = le64_to_cpu(s->offset);
+ sb->version = le64_to_cpu(s->version);
+
+ memcpy(sb->magic, s->magic, 16);
+ memcpy(sb->uuid, s->uuid, 16);
+ memcpy(sb->set_uuid, s->set_uuid, 16);
+ memcpy(sb->label, s->label, SB_LABEL_SIZE);
+
+ sb->flags = le64_to_cpu(s->flags);
+ sb->seq = le64_to_cpu(s->seq);
+ sb->last_mount = le32_to_cpu(s->last_mount);
+ sb->first_bucket = le16_to_cpu(s->first_bucket);
+ sb->keys = le16_to_cpu(s->keys);
+
+ for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
+ sb->d[i] = le64_to_cpu(s->d[i]);
+
+ pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
+ sb->version, sb->flags, sb->seq, sb->keys);
+
+ err = "Not a bcache superblock";
+ if (sb->offset != SB_SECTOR)
+ goto err;
+
+ if (memcmp(sb->magic, bcache_magic, 16))
+ goto err;
+
+ err = "Too many journal buckets";
+ if (sb->keys > SB_JOURNAL_BUCKETS)
+ goto err;
+
+ err = "Bad checksum";
+ if (s->csum != csum_set(s))
+ goto err;
+
+ err = "Bad UUID";
+ if (bch_is_zero(sb->uuid, 16))
+ goto err;
+
+ sb->block_size = le16_to_cpu(s->block_size);
+
+ err = "Superblock block size smaller than device block size";
+ if (sb->block_size << 9 < bdev_logical_block_size(bdev))
+ goto err;
+
+ switch (sb->version) {
+ case BCACHE_SB_VERSION_BDEV:
+ sb->data_offset = BDEV_DATA_START_DEFAULT;
+ break;
+ case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
+ sb->data_offset = le64_to_cpu(s->data_offset);
+
+ err = "Bad data offset";
+ if (sb->data_offset < BDEV_DATA_START_DEFAULT)
+ goto err;
+
+ break;
+ case BCACHE_SB_VERSION_CDEV:
+ case BCACHE_SB_VERSION_CDEV_WITH_UUID:
+ sb->nbuckets = le64_to_cpu(s->nbuckets);
+ sb->block_size = le16_to_cpu(s->block_size);
+ sb->bucket_size = le16_to_cpu(s->bucket_size);
+
+ sb->nr_in_set = le16_to_cpu(s->nr_in_set);
+ sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
+
+ err = "Too many buckets";
+ if (sb->nbuckets > LONG_MAX)
+ goto err;
+
+ err = "Not enough buckets";
+ if (sb->nbuckets < 1 << 7)
+ goto err;
+
+ err = "Bad block/bucket size";
+ if (!is_power_of_2(sb->block_size) ||
+ sb->block_size > PAGE_SECTORS ||
+ !is_power_of_2(sb->bucket_size) ||
+ sb->bucket_size < PAGE_SECTORS)
+ goto err;
+
+ err = "Invalid superblock: device too small";
+ if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
+ goto err;
+
+ err = "Bad UUID";
+ if (bch_is_zero(sb->set_uuid, 16))
+ goto err;
+
+ err = "Bad cache device number in set";
+ if (!sb->nr_in_set ||
+ sb->nr_in_set <= sb->nr_this_dev ||
+ sb->nr_in_set > MAX_CACHES_PER_SET)
+ goto err;
+
+ err = "Journal buckets not sequential";
+ for (i = 0; i < sb->keys; i++)
+ if (sb->d[i] != sb->first_bucket + i)
+ goto err;
+
+ err = "Too many journal buckets";
+ if (sb->first_bucket + sb->keys > sb->nbuckets)
+ goto err;
+
+ err = "Invalid superblock: first bucket comes before end of super";
+ if (sb->first_bucket * sb->bucket_size < 16)
+ goto err;
+
+ break;
+ default:
+ err = "Unsupported superblock version";
+ goto err;
+ }
+
+ sb->last_mount = get_seconds();
+ err = NULL;
+
+ get_page(bh->b_page);
+ *res = bh->b_page;
+err:
+ put_bh(bh);
+ return err;
+}
+
+static void write_bdev_super_endio(struct bio *bio, int error)
+{
+ struct cached_dev *dc = bio->bi_private;
+ /* XXX: error checking */
+
+ closure_put(&dc->sb_write.cl);
+}
+
+static void __write_super(struct cache_sb *sb, struct bio *bio)
+{
+ struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+ unsigned i;
+
+ bio->bi_sector = SB_SECTOR;
+ bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_size = SB_SIZE;
+ bch_bio_map(bio, NULL);
+
+ out->offset = cpu_to_le64(sb->offset);
+ out->version = cpu_to_le64(sb->version);
+
+ memcpy(out->uuid, sb->uuid, 16);
+ memcpy(out->set_uuid, sb->set_uuid, 16);
+ memcpy(out->label, sb->label, SB_LABEL_SIZE);
+
+ out->flags = cpu_to_le64(sb->flags);
+ out->seq = cpu_to_le64(sb->seq);
+
+ out->last_mount = cpu_to_le32(sb->last_mount);
+ out->first_bucket = cpu_to_le16(sb->first_bucket);
+ out->keys = cpu_to_le16(sb->keys);
+
+ for (i = 0; i < sb->keys; i++)
+ out->d[i] = cpu_to_le64(sb->d[i]);
+
+ out->csum = csum_set(out);
+
+ pr_debug("ver %llu, flags %llu, seq %llu",
+ sb->version, sb->flags, sb->seq);
+
+ submit_bio(REQ_WRITE, bio);
+}
+
+void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
+{
+ struct closure *cl = &dc->sb_write.cl;
+ struct bio *bio = &dc->sb_bio;
+
+ closure_lock(&dc->sb_write, parent);
+
+ bio_reset(bio);
+ bio->bi_bdev = dc->bdev;
+ bio->bi_end_io = write_bdev_super_endio;
+ bio->bi_private = dc;
+
+ closure_get(cl);
+ __write_super(&dc->sb, bio);
+
+ closure_return(cl);
+}
+
+static void write_super_endio(struct bio *bio, int error)
+{
+ struct cache *ca = bio->bi_private;
+
+ bch_count_io_errors(ca, error, "writing superblock");
+ closure_put(&ca->set->sb_write.cl);
+}
+
+void bcache_write_super(struct cache_set *c)
+{
+ struct closure *cl = &c->sb_write.cl;
+ struct cache *ca;
+ unsigned i;
+
+ closure_lock(&c->sb_write, &c->cl);
+
+ c->sb.seq++;
+
+ for_each_cache(ca, c, i) {
+ struct bio *bio = &ca->sb_bio;
+
+ ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
+ ca->sb.seq = c->sb.seq;
+ ca->sb.last_mount = c->sb.last_mount;
+
+ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
+
+ bio_reset(bio);
+ bio->bi_bdev = ca->bdev;
+ bio->bi_end_io = write_super_endio;
+ bio->bi_private = ca;
+
+ closure_get(cl);
+ __write_super(&ca->sb, bio);
+ }
+
+ closure_return(cl);
+}
+
+/* UUID io */
+
+static void uuid_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+
+ cache_set_err_on(error, c, "accessing uuids");
+ bch_bbio_free(bio, c);
+ closure_put(cl);
+}
+
+static void uuid_io(struct cache_set *c, unsigned long rw,
+ struct bkey *k, struct closure *parent)
+{
+ struct closure *cl = &c->uuid_write.cl;
+ struct uuid_entry *u;
+ unsigned i;
+
+ BUG_ON(!parent);
+ closure_lock(&c->uuid_write, parent);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ struct bio *bio = bch_bbio_alloc(c);
+
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_size = KEY_SIZE(k) << 9;
+
+ bio->bi_end_io = uuid_endio;
+ bio->bi_private = cl;
+ bch_bio_map(bio, c->uuids);
+
+ bch_submit_bbio(bio, c, k, i);
+
+ if (!(rw & WRITE))
+ break;
+ }
+
+ pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read",
+ pkey(&c->uuid_bucket));
+
+ for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
+ if (!bch_is_zero(u->uuid, 16))
+ pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
+ u - c->uuids, u->uuid, u->label,
+ u->first_reg, u->last_reg, u->invalidated);
+
+ closure_return(cl);
+}
+
+static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
+{
+ struct bkey *k = &j->uuid_bucket;
+
+ if (__bch_ptr_invalid(c, 1, k))
+ return "bad uuid pointer";
+
+ bkey_copy(&c->uuid_bucket, k);
+ uuid_io(c, READ_SYNC, k, cl);
+
+ if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
+ struct uuid_entry_v0 *u0 = (void *) c->uuids;
+ struct uuid_entry *u1 = (void *) c->uuids;
+ int i;
+
+ closure_sync(cl);
+
+ /*
+ * Since the new uuid entry is bigger than the old, we have to
+ * convert starting at the highest memory address and work down
+ * in order to do it in place
+ */
+
+ for (i = c->nr_uuids - 1;
+ i >= 0;
+ --i) {
+ memcpy(u1[i].uuid, u0[i].uuid, 16);
+ memcpy(u1[i].label, u0[i].label, 32);
+
+ u1[i].first_reg = u0[i].first_reg;
+ u1[i].last_reg = u0[i].last_reg;
+ u1[i].invalidated = u0[i].invalidated;
+
+ u1[i].flags = 0;
+ u1[i].sectors = 0;
+ }
+ }
+
+ return NULL;
+}
+
+static int __uuid_write(struct cache_set *c)
+{
+ BKEY_PADDED(key) k;
+ struct closure cl;
+ closure_init_stack(&cl);
+
+ lockdep_assert_held(&bch_register_lock);
+
+ if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
+ return 1;
+
+ SET_KEY_SIZE(&k.key, c->sb.bucket_size);
+ uuid_io(c, REQ_WRITE, &k.key, &cl);
+ closure_sync(&cl);
+
+ bkey_copy(&c->uuid_bucket, &k.key);
+ __bkey_put(c, &k.key);
+ return 0;
+}
+
+int bch_uuid_write(struct cache_set *c)
+{
+ int ret = __uuid_write(c);
+
+ if (!ret)
+ bch_journal_meta(c, NULL);
+
+ return ret;
+}
+
+static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
+{
+ struct uuid_entry *u;
+
+ for (u = c->uuids;
+ u < c->uuids + c->nr_uuids; u++)
+ if (!memcmp(u->uuid, uuid, 16))
+ return u;
+
+ return NULL;
+}
+
+static struct uuid_entry *uuid_find_empty(struct cache_set *c)
+{
+ static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+ return uuid_find(c, zero_uuid);
+}
+
+/*
+ * Bucket priorities/gens:
+ *
+ * For each bucket, we store on disk its
+ * 8 bit gen
+ * 16 bit priority
+ *
+ * See alloc.c for an explanation of the gen. The priority is used to implement
+ * lru (and in the future other) cache replacement policies; for most purposes
+ * it's just an opaque integer.
+ *
+ * The gens and the priorities don't have a whole lot to do with each other, and
+ * it's actually the gens that must be written out at specific times - it's no
+ * big deal if the priorities don't get written, if we lose them we just reuse
+ * buckets in suboptimal order.
+ *
+ * On disk they're stored in a packed array, and in as many buckets are required
+ * to fit them all. The buckets we use to store them form a list; the journal
+ * header points to the first bucket, the first bucket points to the second
+ * bucket, et cetera.
+ *
+ * This code is used by the allocation code; periodically (whenever it runs out
+ * of buckets to allocate from) the allocation code will invalidate some
+ * buckets, but it can't use those buckets until their new gens are safely on
+ * disk.
+ */
+
+static void prio_endio(struct bio *bio, int error)
+{
+ struct cache *ca = bio->bi_private;
+
+ cache_set_err_on(error, ca->set, "accessing priorities");
+ bch_bbio_free(bio, ca->set);
+ closure_put(&ca->prio);
+}
+
+static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
+{
+ struct closure *cl = &ca->prio;
+ struct bio *bio = bch_bbio_alloc(ca->set);
+
+ closure_init_stack(cl);
+
+ bio->bi_sector = bucket * ca->sb.bucket_size;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_size = bucket_bytes(ca);
+
+ bio->bi_end_io = prio_endio;
+ bio->bi_private = ca;
+ bch_bio_map(bio, ca->disk_buckets);
+
+ closure_bio_submit(bio, &ca->prio, ca);
+ closure_sync(cl);
+}
+
+#define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \
+ fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
+
+void bch_prio_write(struct cache *ca)
+{
+ int i;
+ struct bucket *b;
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ lockdep_assert_held(&ca->set->bucket_lock);
+
+ for (b = ca->buckets;
+ b < ca->buckets + ca->sb.nbuckets; b++)
+ b->disk_gen = b->gen;
+
+ ca->disk_buckets->seq++;
+
+ atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
+ &ca->meta_sectors_written);
+
+ pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+ fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+ blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
+
+ for (i = prio_buckets(ca) - 1; i >= 0; --i) {
+ long bucket;
+ struct prio_set *p = ca->disk_buckets;
+ struct bucket_disk *d = p->data;
+ struct bucket_disk *end = d + prios_per_bucket(ca);
+
+ for (b = ca->buckets + i * prios_per_bucket(ca);
+ b < ca->buckets + ca->sb.nbuckets && d < end;
+ b++, d++) {
+ d->prio = cpu_to_le16(b->prio);
+ d->gen = b->gen;
+ }
+
+ p->next_bucket = ca->prio_buckets[i + 1];
+ p->magic = pset_magic(ca);
+ p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
+
+ bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
+ BUG_ON(bucket == -1);
+
+ mutex_unlock(&ca->set->bucket_lock);
+ prio_io(ca, bucket, REQ_WRITE);
+ mutex_lock(&ca->set->bucket_lock);
+
+ ca->prio_buckets[i] = bucket;
+ atomic_dec_bug(&ca->buckets[bucket].pin);
+ }
+
+ mutex_unlock(&ca->set->bucket_lock);
+
+ bch_journal_meta(ca->set, &cl);
+ closure_sync(&cl);
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ ca->need_save_prio = 0;
+
+ /*
+ * Don't want the old priorities to get garbage collected until after we
+ * finish writing the new ones, and they're journalled
+ */
+ for (i = 0; i < prio_buckets(ca); i++)
+ ca->prio_last_buckets[i] = ca->prio_buckets[i];
+}
+
+static void prio_read(struct cache *ca, uint64_t bucket)
+{
+ struct prio_set *p = ca->disk_buckets;
+ struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
+ struct bucket *b;
+ unsigned bucket_nr = 0;
+
+ for (b = ca->buckets;
+ b < ca->buckets + ca->sb.nbuckets;
+ b++, d++) {
+ if (d == end) {
+ ca->prio_buckets[bucket_nr] = bucket;
+ ca->prio_last_buckets[bucket_nr] = bucket;
+ bucket_nr++;
+
+ prio_io(ca, bucket, READ_SYNC);
+
+ if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+ pr_warn("bad csum reading priorities");
+
+ if (p->magic != pset_magic(ca))
+ pr_warn("bad magic reading priorities");
+
+ bucket = p->next_bucket;
+ d = p->data;
+ }
+
+ b->prio = le16_to_cpu(d->prio);
+ b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen;
+ }
+}
+
+/* Bcache device */
+
+static int open_dev(struct block_device *b, fmode_t mode)
+{
+ struct bcache_device *d = b->bd_disk->private_data;
+ if (atomic_read(&d->closing))
+ return -ENXIO;
+
+ closure_get(&d->cl);
+ return 0;
+}
+
+static int release_dev(struct gendisk *b, fmode_t mode)
+{
+ struct bcache_device *d = b->private_data;
+ closure_put(&d->cl);
+ return 0;
+}
+
+static int ioctl_dev(struct block_device *b, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct bcache_device *d = b->bd_disk->private_data;
+ return d->ioctl(d, mode, cmd, arg);
+}
+
+static const struct block_device_operations bcache_ops = {
+ .open = open_dev,
+ .release = release_dev,
+ .ioctl = ioctl_dev,
+ .owner = THIS_MODULE,
+};
+
+void bcache_device_stop(struct bcache_device *d)
+{
+ if (!atomic_xchg(&d->closing, 1))
+ closure_queue(&d->cl);
+}
+
+static void bcache_device_unlink(struct bcache_device *d)
+{
+ unsigned i;
+ struct cache *ca;
+
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+ for_each_cache(ca, d->c, i)
+ bd_unlink_disk_holder(ca->bdev, d->disk);
+}
+
+static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ const char *name)
+{
+ unsigned i;
+ struct cache *ca;
+
+ for_each_cache(ca, d->c, i)
+ bd_link_disk_holder(ca->bdev, d->disk);
+
+ snprintf(d->name, BCACHEDEVNAME_SIZE,
+ "%s%u", name, d->id);
+
+ WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
+ sysfs_create_link(&c->kobj, &d->kobj, d->name),
+ "Couldn't create device <-> cache set symlinks");
+}
+
+static void bcache_device_detach(struct bcache_device *d)
+{
+ lockdep_assert_held(&bch_register_lock);
+
+ if (atomic_read(&d->detaching)) {
+ struct uuid_entry *u = d->c->uuids + d->id;
+
+ SET_UUID_FLASH_ONLY(u, 0);
+ memcpy(u->uuid, invalid_uuid, 16);
+ u->invalidated = cpu_to_le32(get_seconds());
+ bch_uuid_write(d->c);
+
+ atomic_set(&d->detaching, 0);
+ }
+
+ bcache_device_unlink(d);
+
+ d->c->devices[d->id] = NULL;
+ closure_put(&d->c->caching);
+ d->c = NULL;
+}
+
+static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
+ unsigned id)
+{
+ BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags));
+
+ d->id = id;
+ d->c = c;
+ c->devices[id] = d;
+
+ closure_get(&c->caching);
+}
+
+static void bcache_device_free(struct bcache_device *d)
+{
+ lockdep_assert_held(&bch_register_lock);
+
+ pr_info("%s stopped", d->disk->disk_name);
+
+ if (d->c)
+ bcache_device_detach(d);
+
+ if (d->disk)
+ del_gendisk(d->disk);
+ if (d->disk && d->disk->queue)
+ blk_cleanup_queue(d->disk->queue);
+ if (d->disk)
+ put_disk(d->disk);
+
+ bio_split_pool_free(&d->bio_split_hook);
+ if (d->unaligned_bvec)
+ mempool_destroy(d->unaligned_bvec);
+ if (d->bio_split)
+ bioset_free(d->bio_split);
+
+ closure_debug_destroy(&d->cl);
+}
+
+static int bcache_device_init(struct bcache_device *d, unsigned block_size)
+{
+ struct request_queue *q;
+
+ if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+ !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
+ sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
+ bio_split_pool_init(&d->bio_split_hook))
+
+ return -ENOMEM;
+
+ d->disk = alloc_disk(1);
+ if (!d->disk)
+ return -ENOMEM;
+
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
+
+ d->disk->major = bcache_major;
+ d->disk->first_minor = bcache_minor++;
+ d->disk->fops = &bcache_ops;
+ d->disk->private_data = d;
+
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ blk_queue_make_request(q, NULL);
+ d->disk->queue = q;
+ q->queuedata = d;
+ q->backing_dev_info.congested_data = d;
+ q->limits.max_hw_sectors = UINT_MAX;
+ q->limits.max_sectors = UINT_MAX;
+ q->limits.max_segment_size = UINT_MAX;
+ q->limits.max_segments = BIO_MAX_PAGES;
+ q->limits.max_discard_sectors = UINT_MAX;
+ q->limits.io_min = block_size;
+ q->limits.logical_block_size = block_size;
+ q->limits.physical_block_size = block_size;
+ set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
+ set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
+
+ return 0;
+}
+
+/* Cached device */
+
+static void calc_cached_dev_sectors(struct cache_set *c)
+{
+ uint64_t sectors = 0;
+ struct cached_dev *dc;
+
+ list_for_each_entry(dc, &c->cached_devs, list)
+ sectors += bdev_sectors(dc->bdev);
+
+ c->cached_dev_sectors = sectors;
+}
+
+void bch_cached_dev_run(struct cached_dev *dc)
+{
+ struct bcache_device *d = &dc->disk;
+
+ if (atomic_xchg(&dc->running, 1))
+ return;
+
+ if (!d->c &&
+ BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+ struct closure cl;
+ closure_init_stack(&cl);
+
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+ }
+
+ add_disk(d->disk);
+ bd_link_disk_holder(dc->bdev, dc->disk.disk);
+#if 0
+ char *env[] = { "SYMLINK=label" , NULL };
+ kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
+#endif
+ if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
+ sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
+ pr_debug("error creating sysfs link");
+}
+
+static void cached_dev_detach_finish(struct work_struct *w)
+{
+ struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+ char buf[BDEVNAME_SIZE];
+ struct closure cl;
+ closure_init_stack(&cl);
+
+ BUG_ON(!atomic_read(&dc->disk.detaching));
+ BUG_ON(atomic_read(&dc->count));
+
+ mutex_lock(&bch_register_lock);
+
+ memset(&dc->sb.set_uuid, 0, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
+
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+
+ bcache_device_detach(&dc->disk);
+ list_move(&dc->list, &uncached_devices);
+
+ mutex_unlock(&bch_register_lock);
+
+ pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
+
+ /* Drop ref we took in cached_dev_detach() */
+ closure_put(&dc->disk.cl);
+}
+
+void bch_cached_dev_detach(struct cached_dev *dc)
+{
+ lockdep_assert_held(&bch_register_lock);
+
+ if (atomic_read(&dc->disk.closing))
+ return;
+
+ if (atomic_xchg(&dc->disk.detaching, 1))
+ return;
+
+ /*
+ * Block the device from being closed and freed until we're finished
+ * detaching
+ */
+ closure_get(&dc->disk.cl);
+
+ bch_writeback_queue(dc);
+ cached_dev_put(dc);
+}
+
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+{
+ uint32_t rtime = cpu_to_le32(get_seconds());
+ struct uuid_entry *u;
+ char buf[BDEVNAME_SIZE];
+
+ bdevname(dc->bdev, buf);
+
+ if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
+ return -ENOENT;
+
+ if (dc->disk.c) {
+ pr_err("Can't attach %s: already attached", buf);
+ return -EINVAL;
+ }
+
+ if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
+ pr_err("Can't attach %s: shutting down", buf);
+ return -EINVAL;
+ }
+
+ if (dc->sb.block_size < c->sb.block_size) {
+ /* Will die */
+ pr_err("Couldn't attach %s: block size less than set's block size",
+ buf);
+ return -EINVAL;
+ }
+
+ u = uuid_find(c, dc->sb.uuid);
+
+ if (u &&
+ (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
+ BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
+ memcpy(u->uuid, invalid_uuid, 16);
+ u->invalidated = cpu_to_le32(get_seconds());
+ u = NULL;
+ }
+
+ if (!u) {
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ pr_err("Couldn't find uuid for %s in set", buf);
+ return -ENOENT;
+ }
+
+ u = uuid_find_empty(c);
+ if (!u) {
+ pr_err("Not caching %s, no room for UUID", buf);
+ return -EINVAL;
+ }
+ }
+
+ /* Deadlocks since we're called via sysfs...
+ sysfs_remove_file(&dc->kobj, &sysfs_attach);
+ */
+
+ if (bch_is_zero(u->uuid, 16)) {
+ struct closure cl;
+ closure_init_stack(&cl);
+
+ memcpy(u->uuid, dc->sb.uuid, 16);
+ memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
+ u->first_reg = u->last_reg = rtime;
+ bch_uuid_write(c);
+
+ memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+ } else {
+ u->last_reg = rtime;
+ bch_uuid_write(c);
+ }
+
+ bcache_device_attach(&dc->disk, c, u - c->uuids);
+ list_move(&dc->list, &c->cached_devs);
+ calc_cached_dev_sectors(c);
+
+ smp_wmb();
+ /*
+ * dc->c must be set before dc->count != 0 - paired with the mb in
+ * cached_dev_get()
+ */
+ atomic_set(&dc->count, 1);
+
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ atomic_set(&dc->has_dirty, 1);
+ atomic_inc(&dc->count);
+ bch_writeback_queue(dc);
+ }
+
+ bch_cached_dev_run(dc);
+ bcache_device_link(&dc->disk, c, "bdev");
+
+ pr_info("Caching %s as %s on set %pU",
+ bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+ dc->disk.c->sb.set_uuid);
+ return 0;
+}
+
+void bch_cached_dev_release(struct kobject *kobj)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+ kfree(dc);
+ module_put(THIS_MODULE);
+}
+
+static void cached_dev_free(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+
+ mutex_lock(&bch_register_lock);
+
+ bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+ bcache_device_free(&dc->disk);
+ list_del(&dc->list);
+
+ mutex_unlock(&bch_register_lock);
+
+ if (!IS_ERR_OR_NULL(dc->bdev)) {
+ blk_sync_queue(bdev_get_queue(dc->bdev));
+ blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ }
+
+ wake_up(&unregister_wait);
+
+ kobject_put(&dc->disk.kobj);
+}
+
+static void cached_dev_flush(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ struct bcache_device *d = &dc->disk;
+
+ bch_cache_accounting_destroy(&dc->accounting);
+ kobject_del(&d->kobj);
+
+ continue_at(cl, cached_dev_free, system_wq);
+}
+
+static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
+{
+ int err;
+ struct io *io;
+
+ closure_init(&dc->disk.cl, NULL);
+ set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+
+ __module_get(THIS_MODULE);
+ INIT_LIST_HEAD(&dc->list);
+ kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
+
+ bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
+
+ err = bcache_device_init(&dc->disk, block_size);
+ if (err)
+ goto err;
+
+ spin_lock_init(&dc->io_lock);
+ closure_init_unlocked(&dc->sb_write);
+ INIT_WORK(&dc->detach, cached_dev_detach_finish);
+
+ dc->sequential_merge = true;
+ dc->sequential_cutoff = 4 << 20;
+
+ INIT_LIST_HEAD(&dc->io_lru);
+ dc->sb_bio.bi_max_vecs = 1;
+ dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
+
+ for (io = dc->io; io < dc->io + RECENT_IO; io++) {
+ list_add(&io->lru, &dc->io_lru);
+ hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
+ }
+
+ bch_writeback_init_cached_dev(dc);
+ return 0;
+err:
+ bcache_device_stop(&dc->disk);
+ return err;
+}
+
+/* Cached device - bcache superblock */
+
+static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
+ struct block_device *bdev,
+ struct cached_dev *dc)
+{
+ char name[BDEVNAME_SIZE];
+ const char *err = "cannot allocate memory";
+ struct gendisk *g;
+ struct cache_set *c;
+
+ if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
+ return err;
+
+ memcpy(&dc->sb, sb, sizeof(struct cache_sb));
+ dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ dc->bdev = bdev;
+ dc->bdev->bd_holder = dc;
+
+ g = dc->disk.disk;
+
+ set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
+
+ g->queue->backing_dev_info.ra_pages =
+ max(g->queue->backing_dev_info.ra_pages,
+ bdev->bd_queue->backing_dev_info.ra_pages);
+
+ bch_cached_dev_request_init(dc);
+
+ err = "error creating kobject";
+ if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
+ "bcache"))
+ goto err;
+ if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
+ goto err;
+
+ list_add(&dc->list, &uncached_devices);
+ list_for_each_entry(c, &bch_cache_sets, list)
+ bch_cached_dev_attach(dc, c);
+
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
+ BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
+ bch_cached_dev_run(dc);
+
+ return NULL;
+err:
+ kobject_put(&dc->disk.kobj);
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ /*
+ * Return NULL instead of an error because kobject_put() cleans
+ * everything up
+ */
+ return NULL;
+}
+
+/* Flash only volumes */
+
+void bch_flash_dev_release(struct kobject *kobj)
+{
+ struct bcache_device *d = container_of(kobj, struct bcache_device,
+ kobj);
+ kfree(d);
+}
+
+static void flash_dev_free(struct closure *cl)
+{
+ struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+ bcache_device_free(d);
+ kobject_put(&d->kobj);
+}
+
+static void flash_dev_flush(struct closure *cl)
+{
+ struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+
+ bcache_device_unlink(d);
+ kobject_del(&d->kobj);
+ continue_at(cl, flash_dev_free, system_wq);
+}
+
+static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+{
+ struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
+ GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ closure_init(&d->cl, NULL);
+ set_closure_fn(&d->cl, flash_dev_flush, system_wq);
+
+ kobject_init(&d->kobj, &bch_flash_dev_ktype);
+
+ if (bcache_device_init(d, block_bytes(c)))
+ goto err;
+
+ bcache_device_attach(d, c, u - c->uuids);
+ set_capacity(d->disk, u->sectors);
+ bch_flash_dev_request_init(d);
+ add_disk(d->disk);
+
+ if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
+ goto err;
+
+ bcache_device_link(d, c, "volume");
+
+ return 0;
+err:
+ kobject_put(&d->kobj);
+ return -ENOMEM;
+}
+
+static int flash_devs_run(struct cache_set *c)
+{
+ int ret = 0;
+ struct uuid_entry *u;
+
+ for (u = c->uuids;
+ u < c->uuids + c->nr_uuids && !ret;
+ u++)
+ if (UUID_FLASH_ONLY(u))
+ ret = flash_dev_run(c, u);
+
+ return ret;
+}
+
+int bch_flash_dev_create(struct cache_set *c, uint64_t size)
+{
+ struct uuid_entry *u;
+
+ if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ return -EINTR;
+
+ u = uuid_find_empty(c);
+ if (!u) {
+ pr_err("Can't create volume, no room for UUID");
+ return -EINVAL;
+ }
+
+ get_random_bytes(u->uuid, 16);
+ memset(u->label, 0, 32);
+ u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
+
+ SET_UUID_FLASH_ONLY(u, 1);
+ u->sectors = size >> 9;
+
+ bch_uuid_write(c);
+
+ return flash_dev_run(c, u);
+}
+
+/* Cache set */
+
+__printf(2, 3)
+bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+{
+ va_list args;
+
+ if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ return false;
+
+ /* XXX: we can be called from atomic context
+ acquire_console_sem();
+ */
+
+ printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ printk(", disabling caching\n");
+
+ bch_cache_set_unregister(c);
+ return true;
+}
+
+void bch_cache_set_release(struct kobject *kobj)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ kfree(c);
+ module_put(THIS_MODULE);
+}
+
+static void cache_set_free(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, cl);
+ struct cache *ca;
+ unsigned i;
+
+ if (!IS_ERR_OR_NULL(c->debug))
+ debugfs_remove(c->debug);
+
+ bch_open_buckets_free(c);
+ bch_btree_cache_free(c);
+ bch_journal_free(c);
+
+ for_each_cache(ca, c, i)
+ if (ca)
+ kobject_put(&ca->kobj);
+
+ free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+ free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
+
+ kfree(c->fill_iter);
+ if (c->bio_split)
+ bioset_free(c->bio_split);
+ if (c->bio_meta)
+ mempool_destroy(c->bio_meta);
+ if (c->search)
+ mempool_destroy(c->search);
+ kfree(c->devices);
+
+ mutex_lock(&bch_register_lock);
+ list_del(&c->list);
+ mutex_unlock(&bch_register_lock);
+
+ pr_info("Cache set %pU unregistered", c->sb.set_uuid);
+ wake_up(&unregister_wait);
+
+ closure_debug_destroy(&c->cl);
+ kobject_put(&c->kobj);
+}
+
+static void cache_set_flush(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+ struct btree *b;
+
+ /* Shut down allocator threads */
+ set_bit(CACHE_SET_STOPPING_2, &c->flags);
+ wake_up(&c->alloc_wait);
+
+ bch_cache_accounting_destroy(&c->accounting);
+
+ kobject_put(&c->internal);
+ kobject_del(&c->kobj);
+
+ if (!IS_ERR_OR_NULL(c->root))
+ list_add(&c->root->list, &c->btree_cache);
+
+ /* Should skip this if we're unregistering because of an error */
+ list_for_each_entry(b, &c->btree_cache, list)
+ if (btree_node_dirty(b))
+ bch_btree_write(b, true, NULL);
+
+ closure_return(cl);
+}
+
+static void __cache_set_unregister(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+ struct cached_dev *dc, *t;
+ size_t i;
+
+ mutex_lock(&bch_register_lock);
+
+ if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
+ list_for_each_entry_safe(dc, t, &c->cached_devs, list)
+ bch_cached_dev_detach(dc);
+
+ for (i = 0; i < c->nr_uuids; i++)
+ if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
+ bcache_device_stop(c->devices[i]);
+
+ mutex_unlock(&bch_register_lock);
+
+ continue_at(cl, cache_set_flush, system_wq);
+}
+
+void bch_cache_set_stop(struct cache_set *c)
+{
+ if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
+ closure_queue(&c->caching);
+}
+
+void bch_cache_set_unregister(struct cache_set *c)
+{
+ set_bit(CACHE_SET_UNREGISTERING, &c->flags);
+ bch_cache_set_stop(c);
+}
+
+#define alloc_bucket_pages(gfp, c) \
+ ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+{
+ int iter_size;
+ struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
+ if (!c)
+ return NULL;
+
+ __module_get(THIS_MODULE);
+ closure_init(&c->cl, NULL);
+ set_closure_fn(&c->cl, cache_set_free, system_wq);
+
+ closure_init(&c->caching, &c->cl);
+ set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
+
+ /* Maybe create continue_at_noreturn() and use it here? */
+ closure_set_stopped(&c->cl);
+ closure_put(&c->cl);
+
+ kobject_init(&c->kobj, &bch_cache_set_ktype);
+ kobject_init(&c->internal, &bch_cache_set_internal_ktype);
+
+ bch_cache_accounting_init(&c->accounting, &c->cl);
+
+ memcpy(c->sb.set_uuid, sb->set_uuid, 16);
+ c->sb.block_size = sb->block_size;
+ c->sb.bucket_size = sb->bucket_size;
+ c->sb.nr_in_set = sb->nr_in_set;
+ c->sb.last_mount = sb->last_mount;
+ c->bucket_bits = ilog2(sb->bucket_size);
+ c->block_bits = ilog2(sb->block_size);
+ c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
+
+ c->btree_pages = c->sb.bucket_size / PAGE_SECTORS;
+ if (c->btree_pages > BTREE_MAX_PAGES)
+ c->btree_pages = max_t(int, c->btree_pages / 4,
+ BTREE_MAX_PAGES);
+
+ init_waitqueue_head(&c->alloc_wait);
+ mutex_init(&c->bucket_lock);
+ mutex_init(&c->fill_lock);
+ mutex_init(&c->sort_lock);
+ spin_lock_init(&c->sort_time_lock);
+ closure_init_unlocked(&c->sb_write);
+ closure_init_unlocked(&c->uuid_write);
+ spin_lock_init(&c->btree_read_time_lock);
+ bch_moving_init_cache_set(c);
+
+ INIT_LIST_HEAD(&c->list);
+ INIT_LIST_HEAD(&c->cached_devs);
+ INIT_LIST_HEAD(&c->btree_cache);
+ INIT_LIST_HEAD(&c->btree_cache_freeable);
+ INIT_LIST_HEAD(&c->btree_cache_freed);
+ INIT_LIST_HEAD(&c->data_buckets);
+
+ c->search = mempool_create_slab_pool(32, bch_search_cache);
+ if (!c->search)
+ goto err;
+
+ iter_size = (sb->bucket_size / sb->block_size + 1) *
+ sizeof(struct btree_iter_set);
+
+ if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
+ !(c->bio_meta = mempool_create_kmalloc_pool(2,
+ sizeof(struct bbio) + sizeof(struct bio_vec) *
+ bucket_pages(c))) ||
+ !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+ !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
+ !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
+ !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
+ bch_journal_alloc(c) ||
+ bch_btree_cache_alloc(c) ||
+ bch_open_buckets_alloc(c))
+ goto err;
+
+ c->fill_iter->size = sb->bucket_size / sb->block_size;
+
+ c->congested_read_threshold_us = 2000;
+ c->congested_write_threshold_us = 20000;
+ c->error_limit = 8 << IO_ERROR_SHIFT;
+
+ return c;
+err:
+ bch_cache_set_unregister(c);
+ return NULL;
+}
+
+static void run_cache_set(struct cache_set *c)
+{
+ const char *err = "cannot allocate memory";
+ struct cached_dev *dc, *t;
+ struct cache *ca;
+ unsigned i;
+
+ struct btree_op op;
+ bch_btree_op_init_stack(&op);
+ op.lock = SHRT_MAX;
+
+ for_each_cache(ca, c, i)
+ c->nbuckets += ca->sb.nbuckets;
+
+ if (CACHE_SYNC(&c->sb)) {
+ LIST_HEAD(journal);
+ struct bkey *k;
+ struct jset *j;
+
+ err = "cannot allocate memory for journal";
+ if (bch_journal_read(c, &journal, &op))
+ goto err;
+
+ pr_debug("btree_journal_read() done");
+
+ err = "no journal entries found";
+ if (list_empty(&journal))
+ goto err;
+
+ j = &list_entry(journal.prev, struct journal_replay, list)->j;
+
+ err = "IO error reading priorities";
+ for_each_cache(ca, c, i)
+ prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
+
+ /*
+ * If prio_read() fails it'll call cache_set_error and we'll
+ * tear everything down right away, but if we perhaps checked
+ * sooner we could avoid journal replay.
+ */
+
+ k = &j->btree_root;
+
+ err = "bad btree root";
+ if (__bch_ptr_invalid(c, j->btree_level + 1, k))
+ goto err;
+
+ err = "error reading btree root";
+ c->root = bch_btree_node_get(c, k, j->btree_level, &op);
+ if (IS_ERR_OR_NULL(c->root))
+ goto err;
+
+ list_del_init(&c->root->list);
+ rw_unlock(true, c->root);
+
+ err = uuid_read(c, j, &op.cl);
+ if (err)
+ goto err;
+
+ err = "error in recovery";
+ if (bch_btree_check(c, &op))
+ goto err;
+
+ bch_journal_mark(c, &journal);
+ bch_btree_gc_finish(c);
+ pr_debug("btree_check() done");
+
+ /*
+ * bcache_journal_next() can't happen sooner, or
+ * btree_gc_finish() will give spurious errors about last_gc >
+ * gc_gen - this is a hack but oh well.
+ */
+ bch_journal_next(&c->journal);
+
+ for_each_cache(ca, c, i)
+ closure_call(&ca->alloc, bch_allocator_thread,
+ system_wq, &c->cl);
+
+ /*
+ * First place it's safe to allocate: btree_check() and
+ * btree_gc_finish() have to run before we have buckets to
+ * allocate, and bch_bucket_alloc_set() might cause a journal
+ * entry to be written so bcache_journal_next() has to be called
+ * first.
+ *
+ * If the uuids were in the old format we have to rewrite them
+ * before the next journal entry is written:
+ */
+ if (j->version < BCACHE_JSET_VERSION_UUID)
+ __uuid_write(c);
+
+ bch_journal_replay(c, &journal, &op);
+ } else {
+ pr_notice("invalidating existing data");
+ /* Don't want invalidate_buckets() to queue a gc yet */
+ closure_lock(&c->gc, NULL);
+
+ for_each_cache(ca, c, i) {
+ unsigned j;
+
+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+ 2, SB_JOURNAL_BUCKETS);
+
+ for (j = 0; j < ca->sb.keys; j++)
+ ca->sb.d[j] = ca->sb.first_bucket + j;
+ }
+
+ bch_btree_gc_finish(c);
+
+ for_each_cache(ca, c, i)
+ closure_call(&ca->alloc, bch_allocator_thread,
+ ca->alloc_workqueue, &c->cl);
+
+ mutex_lock(&c->bucket_lock);
+ for_each_cache(ca, c, i)
+ bch_prio_write(ca);
+ mutex_unlock(&c->bucket_lock);
+
+ wake_up(&c->alloc_wait);
+
+ err = "cannot allocate new UUID bucket";
+ if (__uuid_write(c))
+ goto err_unlock_gc;
+
+ err = "cannot allocate new btree root";
+ c->root = bch_btree_node_alloc(c, 0, &op.cl);
+ if (IS_ERR_OR_NULL(c->root))
+ goto err_unlock_gc;
+
+ bkey_copy_key(&c->root->key, &MAX_KEY);
+ bch_btree_write(c->root, true, &op);
+
+ bch_btree_set_root(c->root);
+ rw_unlock(true, c->root);
+
+ /*
+ * We don't want to write the first journal entry until
+ * everything is set up - fortunately journal entries won't be
+ * written until the SET_CACHE_SYNC() here:
+ */
+ SET_CACHE_SYNC(&c->sb, true);
+
+ bch_journal_next(&c->journal);
+ bch_journal_meta(c, &op.cl);
+
+ /* Unlock */
+ closure_set_stopped(&c->gc.cl);
+ closure_put(&c->gc.cl);
+ }
+
+ closure_sync(&op.cl);
+ c->sb.last_mount = get_seconds();
+ bcache_write_super(c);
+
+ list_for_each_entry_safe(dc, t, &uncached_devices, list)
+ bch_cached_dev_attach(dc, c);
+
+ flash_devs_run(c);
+
+ return;
+err_unlock_gc:
+ closure_set_stopped(&c->gc.cl);
+ closure_put(&c->gc.cl);
+err:
+ closure_sync(&op.cl);
+ /* XXX: test this, it's broken */
+ bch_cache_set_error(c, err);
+}
+
+static bool can_attach_cache(struct cache *ca, struct cache_set *c)
+{
+ return ca->sb.block_size == c->sb.block_size &&
+ ca->sb.bucket_size == c->sb.block_size &&
+ ca->sb.nr_in_set == c->sb.nr_in_set;
+}
+
+static const char *register_cache_set(struct cache *ca)
+{
+ char buf[12];
+ const char *err = "cannot allocate memory";
+ struct cache_set *c;
+
+ list_for_each_entry(c, &bch_cache_sets, list)
+ if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
+ if (c->cache[ca->sb.nr_this_dev])
+ return "duplicate cache set member";
+
+ if (!can_attach_cache(ca, c))
+ return "cache sb does not match set";
+
+ if (!CACHE_SYNC(&ca->sb))
+ SET_CACHE_SYNC(&c->sb, false);
+
+ goto found;
+ }
+
+ c = bch_cache_set_alloc(&ca->sb);
+ if (!c)
+ return err;
+
+ err = "error creating kobject";
+ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
+ kobject_add(&c->internal, &c->kobj, "internal"))
+ goto err;
+
+ if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
+ goto err;
+
+ bch_debug_init_cache_set(c);
+
+ list_add(&c->list, &bch_cache_sets);
+found:
+ sprintf(buf, "cache%i", ca->sb.nr_this_dev);
+ if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
+ sysfs_create_link(&c->kobj, &ca->kobj, buf))
+ goto err;
+
+ if (ca->sb.seq > c->sb.seq) {
+ c->sb.version = ca->sb.version;
+ memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
+ c->sb.flags = ca->sb.flags;
+ c->sb.seq = ca->sb.seq;
+ pr_debug("set version = %llu", c->sb.version);
+ }
+
+ ca->set = c;
+ ca->set->cache[ca->sb.nr_this_dev] = ca;
+ c->cache_by_alloc[c->caches_loaded++] = ca;
+
+ if (c->caches_loaded == c->sb.nr_in_set)
+ run_cache_set(c);
+
+ return NULL;
+err:
+ bch_cache_set_unregister(c);
+ return err;
+}
+
+/* Cache device */
+
+void bch_cache_release(struct kobject *kobj)
+{
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+
+ if (ca->set)
+ ca->set->cache[ca->sb.nr_this_dev] = NULL;
+
+ bch_cache_allocator_exit(ca);
+
+ bio_split_pool_free(&ca->bio_split_hook);
+
+ if (ca->alloc_workqueue)
+ destroy_workqueue(ca->alloc_workqueue);
+
+ free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
+ kfree(ca->prio_buckets);
+ vfree(ca->buckets);
+
+ free_heap(&ca->heap);
+ free_fifo(&ca->unused);
+ free_fifo(&ca->free_inc);
+ free_fifo(&ca->free);
+
+ if (ca->sb_bio.bi_inline_vecs[0].bv_page)
+ put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+
+ if (!IS_ERR_OR_NULL(ca->bdev)) {
+ blk_sync_queue(bdev_get_queue(ca->bdev));
+ blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ }
+
+ kfree(ca);
+ module_put(THIS_MODULE);
+}
+
+static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+{
+ size_t free;
+ struct bucket *b;
+
+ if (!ca)
+ return -ENOMEM;
+
+ __module_get(THIS_MODULE);
+ kobject_init(&ca->kobj, &bch_cache_ktype);
+
+ memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+
+ INIT_LIST_HEAD(&ca->discards);
+
+ bio_init(&ca->sb_bio);
+ ca->sb_bio.bi_max_vecs = 1;
+ ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
+
+ bio_init(&ca->journal.bio);
+ ca->journal.bio.bi_max_vecs = 8;
+ ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+
+ free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
+ free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+
+ if (!init_fifo(&ca->free, free, GFP_KERNEL) ||
+ !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
+ !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
+ !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
+ !(ca->buckets = vmalloc(sizeof(struct bucket) *
+ ca->sb.nbuckets)) ||
+ !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
+ 2, GFP_KERNEL)) ||
+ !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
+ !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
+ bio_split_pool_init(&ca->bio_split_hook))
+ goto err;
+
+ ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
+
+ memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
+ for_each_bucket(b, ca)
+ atomic_set(&b->pin, 0);
+
+ if (bch_cache_allocator_init(ca))
+ goto err;
+
+ return 0;
+err:
+ kobject_put(&ca->kobj);
+ return -ENOMEM;
+}
+
+static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
+ struct block_device *bdev, struct cache *ca)
+{
+ char name[BDEVNAME_SIZE];
+ const char *err = "cannot allocate memory";
+
+ if (cache_alloc(sb, ca) != 0)
+ return err;
+
+ ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ ca->bdev = bdev;
+ ca->bdev->bd_holder = ca;
+
+ if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ ca->discard = CACHE_DISCARD(&ca->sb);
+
+ err = "error creating kobject";
+ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
+ goto err;
+
+ err = register_cache_set(ca);
+ if (err)
+ goto err;
+
+ pr_info("registered cache device %s", bdevname(bdev, name));
+
+ return NULL;
+err:
+ kobject_put(&ca->kobj);
+ pr_info("error opening %s: %s", bdevname(bdev, name), err);
+ /* Return NULL instead of an error because kobject_put() cleans
+ * everything up
+ */
+ return NULL;
+}
+
+/* Global interfaces/init */
+
+static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
+ const char *, size_t);
+
+kobj_attribute_write(register, register_bcache);
+kobj_attribute_write(register_quiet, register_bcache);
+
+static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ const char *buffer, size_t size)
+{
+ ssize_t ret = size;
+ const char *err = "cannot allocate memory";
+ char *path = NULL;
+ struct cache_sb *sb = NULL;
+ struct block_device *bdev = NULL;
+ struct page *sb_page = NULL;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EBUSY;
+
+ mutex_lock(&bch_register_lock);
+
+ if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
+ !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
+ goto err;
+
+ err = "failed to open device";
+ bdev = blkdev_get_by_path(strim(path),
+ FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+ sb);
+ if (bdev == ERR_PTR(-EBUSY))
+ err = "device busy";
+
+ if (IS_ERR(bdev) ||
+ set_blocksize(bdev, 4096))
+ goto err;
+
+ err = read_super(sb, bdev, &sb_page);
+ if (err)
+ goto err_close;
+
+ if (SB_IS_BDEV(sb)) {
+ struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+
+ err = register_bdev(sb, sb_page, bdev, dc);
+ } else {
+ struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+
+ err = register_cache(sb, sb_page, bdev, ca);
+ }
+
+ if (err) {
+ /* register_(bdev|cache) will only return an error if they
+ * didn't get far enough to create the kobject - if they did,
+ * the kobject destructor will do this cleanup.
+ */
+ put_page(sb_page);
+err_close:
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+err:
+ if (attr != &ksysfs_register_quiet)
+ pr_info("error opening %s: %s", path, err);
+ ret = -EINVAL;
+ }
+
+ kfree(sb);
+ kfree(path);
+ mutex_unlock(&bch_register_lock);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
+{
+ if (code == SYS_DOWN ||
+ code == SYS_HALT ||
+ code == SYS_POWER_OFF) {
+ DEFINE_WAIT(wait);
+ unsigned long start = jiffies;
+ bool stopped = false;
+
+ struct cache_set *c, *tc;
+ struct cached_dev *dc, *tdc;
+
+ mutex_lock(&bch_register_lock);
+
+ if (list_empty(&bch_cache_sets) &&
+ list_empty(&uncached_devices))
+ goto out;
+
+ pr_info("Stopping all devices:");
+
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ bch_cache_set_stop(c);
+
+ list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
+ bcache_device_stop(&dc->disk);
+
+ /* What's a condition variable? */
+ while (1) {
+ long timeout = start + 2 * HZ - jiffies;
+
+ stopped = list_empty(&bch_cache_sets) &&
+ list_empty(&uncached_devices);
+
+ if (timeout < 0 || stopped)
+ break;
+
+ prepare_to_wait(&unregister_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ mutex_unlock(&bch_register_lock);
+ schedule_timeout(timeout);
+ mutex_lock(&bch_register_lock);
+ }
+
+ finish_wait(&unregister_wait, &wait);
+
+ if (stopped)
+ pr_info("All devices stopped");
+ else
+ pr_notice("Timeout waiting for devices to be closed");
+out:
+ mutex_unlock(&bch_register_lock);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block reboot = {
+ .notifier_call = bcache_reboot,
+ .priority = INT_MAX, /* before any real devices */
+};
+
+static void bcache_exit(void)
+{
+ bch_debug_exit();
+ bch_writeback_exit();
+ bch_request_exit();
+ bch_btree_exit();
+ if (bcache_kobj)
+ kobject_put(bcache_kobj);
+ if (bcache_wq)
+ destroy_workqueue(bcache_wq);
+ unregister_blkdev(bcache_major, "bcache");
+ unregister_reboot_notifier(&reboot);
+}
+
+static int __init bcache_init(void)
+{
+ static const struct attribute *files[] = {
+ &ksysfs_register.attr,
+ &ksysfs_register_quiet.attr,
+ NULL
+ };
+
+ mutex_init(&bch_register_lock);
+ init_waitqueue_head(&unregister_wait);
+ register_reboot_notifier(&reboot);
+ closure_debug_init();
+
+ bcache_major = register_blkdev(0, "bcache");
+ if (bcache_major < 0)
+ return bcache_major;
+
+ if (!(bcache_wq = create_workqueue("bcache")) ||
+ !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+ sysfs_create_files(bcache_kobj, files) ||
+ bch_btree_init() ||
+ bch_request_init() ||
+ bch_writeback_init() ||
+ bch_debug_init(bcache_kobj))
+ goto err;
+
+ return 0;
+err:
+ bcache_exit();
+ return -ENOMEM;
+}
+
+module_exit(bcache_exit);
+module_init(bcache_init);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
new file mode 100644
index 0000000..4d9cca4
--- /dev/null
+++ b/drivers/md/bcache/sysfs.c
@@ -0,0 +1,817 @@
+/*
+ * bcache sysfs interfaces
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "sysfs.h"
+#include "btree.h"
+#include "request.h"
+
+#include <linux/sort.h>
+
+static const char * const cache_replacement_policies[] = {
+ "lru",
+ "fifo",
+ "random",
+ NULL
+};
+
+write_attribute(attach);
+write_attribute(detach);
+write_attribute(unregister);
+write_attribute(stop);
+write_attribute(clear_stats);
+write_attribute(trigger_gc);
+write_attribute(prune_cache);
+write_attribute(flash_vol_create);
+
+read_attribute(bucket_size);
+read_attribute(block_size);
+read_attribute(nbuckets);
+read_attribute(tree_depth);
+read_attribute(root_usage_percent);
+read_attribute(priority_stats);
+read_attribute(btree_cache_size);
+read_attribute(btree_cache_max_chain);
+read_attribute(cache_available_percent);
+read_attribute(written);
+read_attribute(btree_written);
+read_attribute(metadata_written);
+read_attribute(active_journal_entries);
+
+sysfs_time_stats_attribute(btree_gc, sec, ms);
+sysfs_time_stats_attribute(btree_split, sec, us);
+sysfs_time_stats_attribute(btree_sort, ms, us);
+sysfs_time_stats_attribute(btree_read, ms, us);
+sysfs_time_stats_attribute(try_harder, ms, us);
+
+read_attribute(btree_nodes);
+read_attribute(btree_used_percent);
+read_attribute(average_key_size);
+read_attribute(dirty_data);
+read_attribute(bset_tree_stats);
+
+read_attribute(state);
+read_attribute(cache_read_races);
+read_attribute(writeback_keys_done);
+read_attribute(writeback_keys_failed);
+read_attribute(io_errors);
+read_attribute(congested);
+rw_attribute(congested_read_threshold_us);
+rw_attribute(congested_write_threshold_us);
+
+rw_attribute(sequential_cutoff);
+rw_attribute(sequential_merge);
+rw_attribute(data_csum);
+rw_attribute(cache_mode);
+rw_attribute(writeback_metadata);
+rw_attribute(writeback_running);
+rw_attribute(writeback_percent);
+rw_attribute(writeback_delay);
+rw_attribute(writeback_rate);
+
+rw_attribute(writeback_rate_update_seconds);
+rw_attribute(writeback_rate_d_term);
+rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_d_smooth);
+read_attribute(writeback_rate_debug);
+
+rw_attribute(synchronous);
+rw_attribute(journal_delay_ms);
+rw_attribute(discard);
+rw_attribute(running);
+rw_attribute(label);
+rw_attribute(readahead);
+rw_attribute(io_error_limit);
+rw_attribute(io_error_halflife);
+rw_attribute(verify);
+rw_attribute(key_merging_disabled);
+rw_attribute(gc_always_rewrite);
+rw_attribute(freelist_percent);
+rw_attribute(cache_replacement_policy);
+rw_attribute(btree_shrinker_disabled);
+rw_attribute(copy_gc_enabled);
+rw_attribute(size);
+
+SHOW(__bch_cached_dev)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+ const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+
+#define var(stat) (dc->stat)
+
+ if (attr == &sysfs_cache_mode)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ bch_cache_modes + 1,
+ BDEV_CACHE_MODE(&dc->sb));
+
+ sysfs_printf(data_csum, "%i", dc->disk.data_csum);
+ var_printf(verify, "%i");
+ var_printf(writeback_metadata, "%i");
+ var_printf(writeback_running, "%i");
+ var_print(writeback_delay);
+ var_print(writeback_percent);
+ sysfs_print(writeback_rate, dc->writeback_rate.rate);
+
+ var_print(writeback_rate_update_seconds);
+ var_print(writeback_rate_d_term);
+ var_print(writeback_rate_p_term_inverse);
+ var_print(writeback_rate_d_smooth);
+
+ if (attr == &sysfs_writeback_rate_debug) {
+ char dirty[20];
+ char derivative[20];
+ char target[20];
+ bch_hprint(dirty,
+ atomic_long_read(&dc->disk.sectors_dirty) << 9);
+ bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(target, dc->writeback_rate_target << 9);
+
+ return sprintf(buf,
+ "rate:\t\t%u\n"
+ "change:\t\t%i\n"
+ "dirty:\t\t%s\n"
+ "derivative:\t%s\n"
+ "target:\t\t%s\n",
+ dc->writeback_rate.rate,
+ dc->writeback_rate_change,
+ dirty, derivative, target);
+ }
+
+ sysfs_hprint(dirty_data,
+ atomic_long_read(&dc->disk.sectors_dirty) << 9);
+
+ var_printf(sequential_merge, "%i");
+ var_hprint(sequential_cutoff);
+ var_hprint(readahead);
+
+ sysfs_print(running, atomic_read(&dc->running));
+ sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
+
+ if (attr == &sysfs_label) {
+ memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
+ buf[SB_LABEL_SIZE + 1] = '\0';
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
+
+#undef var
+ return 0;
+}
+SHOW_LOCKED(bch_cached_dev)
+
+STORE(__cached_dev)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+ unsigned v = size;
+ struct cache_set *c;
+
+#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
+
+ sysfs_strtoul(data_csum, dc->disk.data_csum);
+ d_strtoul(verify);
+ d_strtoul(writeback_metadata);
+ d_strtoul(writeback_running);
+ d_strtoul(writeback_delay);
+ sysfs_strtoul_clamp(writeback_rate,
+ dc->writeback_rate.rate, 1, 1000000);
+ sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
+
+ d_strtoul(writeback_rate_update_seconds);
+ d_strtoul(writeback_rate_d_term);
+ d_strtoul(writeback_rate_p_term_inverse);
+ sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
+ dc->writeback_rate_p_term_inverse, 1, INT_MAX);
+ d_strtoul(writeback_rate_d_smooth);
+
+ d_strtoul(sequential_merge);
+ d_strtoi_h(sequential_cutoff);
+ d_strtoi_h(readahead);
+
+ if (attr == &sysfs_clear_stats)
+ bch_cache_accounting_clear(&dc->accounting);
+
+ if (attr == &sysfs_running &&
+ strtoul_or_return(buf))
+ bch_cached_dev_run(dc);
+
+ if (attr == &sysfs_cache_mode) {
+ ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+
+ if (v < 0)
+ return v;
+
+ if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
+ SET_BDEV_CACHE_MODE(&dc->sb, v);
+ bch_write_bdev_super(dc, NULL);
+ }
+ }
+
+ if (attr == &sysfs_label) {
+ memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+ bch_write_bdev_super(dc, NULL);
+ if (dc->disk.c) {
+ memcpy(dc->disk.c->uuids[dc->disk.id].label,
+ buf, SB_LABEL_SIZE);
+ bch_uuid_write(dc->disk.c);
+ }
+ }
+
+ if (attr == &sysfs_attach) {
+ if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
+ return -EINVAL;
+
+ list_for_each_entry(c, &bch_cache_sets, list) {
+ v = bch_cached_dev_attach(dc, c);
+ if (!v)
+ return size;
+ }
+
+ pr_err("Can't attach %s: cache set not found", buf);
+ size = v;
+ }
+
+ if (attr == &sysfs_detach && dc->disk.c)
+ bch_cached_dev_detach(dc);
+
+ if (attr == &sysfs_stop)
+ bcache_device_stop(&dc->disk);
+
+ return size;
+}
+
+STORE(bch_cached_dev)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+
+ mutex_lock(&bch_register_lock);
+ size = __cached_dev_store(kobj, attr, buf, size);
+
+ if (attr == &sysfs_writeback_running)
+ bch_writeback_queue(dc);
+
+ if (attr == &sysfs_writeback_percent)
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+
+ mutex_unlock(&bch_register_lock);
+ return size;
+}
+
+static struct attribute *bch_cached_dev_files[] = {
+ &sysfs_attach,
+ &sysfs_detach,
+ &sysfs_stop,
+#if 0
+ &sysfs_data_csum,
+#endif
+ &sysfs_cache_mode,
+ &sysfs_writeback_metadata,
+ &sysfs_writeback_running,
+ &sysfs_writeback_delay,
+ &sysfs_writeback_percent,
+ &sysfs_writeback_rate,
+ &sysfs_writeback_rate_update_seconds,
+ &sysfs_writeback_rate_d_term,
+ &sysfs_writeback_rate_p_term_inverse,
+ &sysfs_writeback_rate_d_smooth,
+ &sysfs_writeback_rate_debug,
+ &sysfs_dirty_data,
+ &sysfs_sequential_cutoff,
+ &sysfs_sequential_merge,
+ &sysfs_clear_stats,
+ &sysfs_running,
+ &sysfs_state,
+ &sysfs_label,
+ &sysfs_readahead,
+#ifdef CONFIG_BCACHE_DEBUG
+ &sysfs_verify,
+#endif
+ NULL
+};
+KTYPE(bch_cached_dev);
+
+SHOW(bch_flash_dev)
+{
+ struct bcache_device *d = container_of(kobj, struct bcache_device,
+ kobj);
+ struct uuid_entry *u = &d->c->uuids[d->id];
+
+ sysfs_printf(data_csum, "%i", d->data_csum);
+ sysfs_hprint(size, u->sectors << 9);
+
+ if (attr == &sysfs_label) {
+ memcpy(buf, u->label, SB_LABEL_SIZE);
+ buf[SB_LABEL_SIZE + 1] = '\0';
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
+
+ return 0;
+}
+
+STORE(__bch_flash_dev)
+{
+ struct bcache_device *d = container_of(kobj, struct bcache_device,
+ kobj);
+ struct uuid_entry *u = &d->c->uuids[d->id];
+
+ sysfs_strtoul(data_csum, d->data_csum);
+
+ if (attr == &sysfs_size) {
+ uint64_t v;
+ strtoi_h_or_return(buf, v);
+
+ u->sectors = v >> 9;
+ bch_uuid_write(d->c);
+ set_capacity(d->disk, u->sectors);
+ }
+
+ if (attr == &sysfs_label) {
+ memcpy(u->label, buf, SB_LABEL_SIZE);
+ bch_uuid_write(d->c);
+ }
+
+ if (attr == &sysfs_unregister) {
+ atomic_set(&d->detaching, 1);
+ bcache_device_stop(d);
+ }
+
+ return size;
+}
+STORE_LOCKED(bch_flash_dev)
+
+static struct attribute *bch_flash_dev_files[] = {
+ &sysfs_unregister,
+#if 0
+ &sysfs_data_csum,
+#endif
+ &sysfs_label,
+ &sysfs_size,
+ NULL
+};
+KTYPE(bch_flash_dev);
+
+SHOW(__bch_cache_set)
+{
+ unsigned root_usage(struct cache_set *c)
+ {
+ unsigned bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+ struct btree_iter iter;
+
+ goto lock_root;
+
+ do {
+ rw_unlock(false, b);
+lock_root:
+ b = c->root;
+ rw_lock(false, b, b->level);
+ } while (b != c->root);
+
+ for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ bytes += bkey_bytes(k);
+
+ rw_unlock(false, b);
+
+ return (bytes * 100) / btree_bytes(c);
+ }
+
+ size_t cache_size(struct cache_set *c)
+ {
+ size_t ret = 0;
+ struct btree *b;
+
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry(b, &c->btree_cache, list)
+ ret += 1 << (b->page_order + PAGE_SHIFT);
+
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+ }
+
+ unsigned cache_max_chain(struct cache_set *c)
+ {
+ unsigned ret = 0;
+ struct hlist_head *h;
+
+ mutex_lock(&c->bucket_lock);
+
+ for (h = c->bucket_hash;
+ h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+ h++) {
+ unsigned i = 0;
+ struct hlist_node *p;
+
+ hlist_for_each(p, h)
+ i++;
+
+ ret = max(ret, i);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+ }
+
+ unsigned btree_used(struct cache_set *c)
+ {
+ return div64_u64(c->gc_stats.key_bytes * 100,
+ (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+ }
+
+ unsigned average_key_size(struct cache_set *c)
+ {
+ return c->gc_stats.nkeys
+ ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+ : 0;
+ }
+
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+ sysfs_print(synchronous, CACHE_SYNC(&c->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+ sysfs_hprint(bucket_size, bucket_bytes(c));
+ sysfs_hprint(block_size, block_bytes(c));
+ sysfs_print(tree_depth, c->root->level);
+ sysfs_print(root_usage_percent, root_usage(c));
+
+ sysfs_hprint(btree_cache_size, cache_size(c));
+ sysfs_print(btree_cache_max_chain, cache_max_chain(c));
+ sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
+
+ sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
+ sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
+ sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us);
+ sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
+ sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
+
+ sysfs_print(btree_used_percent, btree_used(c));
+ sysfs_print(btree_nodes, c->gc_stats.nodes);
+ sysfs_hprint(dirty_data, c->gc_stats.dirty);
+ sysfs_hprint(average_key_size, average_key_size(c));
+
+ sysfs_print(cache_read_races,
+ atomic_long_read(&c->cache_read_races));
+
+ sysfs_print(writeback_keys_done,
+ atomic_long_read(&c->writeback_keys_done));
+ sysfs_print(writeback_keys_failed,
+ atomic_long_read(&c->writeback_keys_failed));
+
+ /* See count_io_errors for why 88 */
+ sysfs_print(io_error_halflife, c->error_decay * 88);
+ sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
+
+ sysfs_hprint(congested,
+ ((uint64_t) bch_get_congested(c)) << 9);
+ sysfs_print(congested_read_threshold_us,
+ c->congested_read_threshold_us);
+ sysfs_print(congested_write_threshold_us,
+ c->congested_write_threshold_us);
+
+ sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
+ sysfs_printf(verify, "%i", c->verify);
+ sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
+ sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
+ sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
+ sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+
+ if (attr == &sysfs_bset_tree_stats)
+ return bch_bset_print_stats(c, buf);
+
+ return 0;
+}
+SHOW_LOCKED(bch_cache_set)
+
+STORE(__bch_cache_set)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+ if (attr == &sysfs_unregister)
+ bch_cache_set_unregister(c);
+
+ if (attr == &sysfs_stop)
+ bch_cache_set_stop(c);
+
+ if (attr == &sysfs_synchronous) {
+ bool sync = strtoul_or_return(buf);
+
+ if (sync != CACHE_SYNC(&c->sb)) {
+ SET_CACHE_SYNC(&c->sb, sync);
+ bcache_write_super(c);
+ }
+ }
+
+ if (attr == &sysfs_flash_vol_create) {
+ int r;
+ uint64_t v;
+ strtoi_h_or_return(buf, v);
+
+ r = bch_flash_dev_create(c, v);
+ if (r)
+ return r;
+ }
+
+ if (attr == &sysfs_clear_stats) {
+ atomic_long_set(&c->writeback_keys_done, 0);
+ atomic_long_set(&c->writeback_keys_failed, 0);
+
+ memset(&c->gc_stats, 0, sizeof(struct gc_stat));
+ bch_cache_accounting_clear(&c->accounting);
+ }
+
+ if (attr == &sysfs_trigger_gc)
+ bch_queue_gc(c);
+
+ if (attr == &sysfs_prune_cache) {
+ struct shrink_control sc;
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->shrink.shrink(&c->shrink, &sc);
+ }
+
+ sysfs_strtoul(congested_read_threshold_us,
+ c->congested_read_threshold_us);
+ sysfs_strtoul(congested_write_threshold_us,
+ c->congested_write_threshold_us);
+
+ if (attr == &sysfs_io_error_limit)
+ c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
+
+ /* See count_io_errors() for why 88 */
+ if (attr == &sysfs_io_error_halflife)
+ c->error_decay = strtoul_or_return(buf) / 88;
+
+ sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
+ sysfs_strtoul(verify, c->verify);
+ sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
+ sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
+ sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
+ sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
+
+ return size;
+}
+STORE_LOCKED(bch_cache_set)
+
+SHOW(bch_cache_set_internal)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
+ return bch_cache_set_show(&c->kobj, attr, buf);
+}
+
+STORE(bch_cache_set_internal)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
+ return bch_cache_set_store(&c->kobj, attr, buf, size);
+}
+
+static void bch_cache_set_internal_release(struct kobject *k)
+{
+}
+
+static struct attribute *bch_cache_set_files[] = {
+ &sysfs_unregister,
+ &sysfs_stop,
+ &sysfs_synchronous,
+ &sysfs_journal_delay_ms,
+ &sysfs_flash_vol_create,
+
+ &sysfs_bucket_size,
+ &sysfs_block_size,
+ &sysfs_tree_depth,
+ &sysfs_root_usage_percent,
+ &sysfs_btree_cache_size,
+ &sysfs_cache_available_percent,
+
+ &sysfs_average_key_size,
+ &sysfs_dirty_data,
+
+ &sysfs_io_error_limit,
+ &sysfs_io_error_halflife,
+ &sysfs_congested,
+ &sysfs_congested_read_threshold_us,
+ &sysfs_congested_write_threshold_us,
+ &sysfs_clear_stats,
+ NULL
+};
+KTYPE(bch_cache_set);
+
+static struct attribute *bch_cache_set_internal_files[] = {
+ &sysfs_active_journal_entries,
+
+ sysfs_time_stats_attribute_list(btree_gc, sec, ms)
+ sysfs_time_stats_attribute_list(btree_split, sec, us)
+ sysfs_time_stats_attribute_list(btree_sort, ms, us)
+ sysfs_time_stats_attribute_list(btree_read, ms, us)
+ sysfs_time_stats_attribute_list(try_harder, ms, us)
+
+ &sysfs_btree_nodes,
+ &sysfs_btree_used_percent,
+ &sysfs_btree_cache_max_chain,
+
+ &sysfs_bset_tree_stats,
+ &sysfs_cache_read_races,
+ &sysfs_writeback_keys_done,
+ &sysfs_writeback_keys_failed,
+
+ &sysfs_trigger_gc,
+ &sysfs_prune_cache,
+#ifdef CONFIG_BCACHE_DEBUG
+ &sysfs_verify,
+ &sysfs_key_merging_disabled,
+#endif
+ &sysfs_gc_always_rewrite,
+ &sysfs_btree_shrinker_disabled,
+ &sysfs_copy_gc_enabled,
+ NULL
+};
+KTYPE(bch_cache_set_internal);
+
+SHOW(__bch_cache)
+{
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+
+ sysfs_hprint(bucket_size, bucket_bytes(ca));
+ sysfs_hprint(block_size, block_bytes(ca));
+ sysfs_print(nbuckets, ca->sb.nbuckets);
+ sysfs_print(discard, ca->discard);
+ sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
+ sysfs_hprint(btree_written,
+ atomic_long_read(&ca->btree_sectors_written) << 9);
+ sysfs_hprint(metadata_written,
+ (atomic_long_read(&ca->meta_sectors_written) +
+ atomic_long_read(&ca->btree_sectors_written)) << 9);
+
+ sysfs_print(io_errors,
+ atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
+
+ sysfs_print(freelist_percent, ca->free.size * 100 /
+ ((size_t) ca->sb.nbuckets));
+
+ if (attr == &sysfs_cache_replacement_policy)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ cache_replacement_policies,
+ CACHE_REPLACEMENT(&ca->sb));
+
+ if (attr == &sysfs_priority_stats) {
+ int cmp(const void *l, const void *r)
+ { return *((uint16_t *) r) - *((uint16_t *) l); }
+
+ /* Number of quantiles we compute */
+ const unsigned nq = 31;
+
+ size_t n = ca->sb.nbuckets, i, unused, btree;
+ uint64_t sum = 0;
+ uint16_t q[nq], *p, *cached;
+ ssize_t ret;
+
+ cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
+ if (!p)
+ return -ENOMEM;
+
+ mutex_lock(&ca->set->bucket_lock);
+ for (i = ca->sb.first_bucket; i < n; i++)
+ p[i] = ca->buckets[i].prio;
+ mutex_unlock(&ca->set->bucket_lock);
+
+ sort(p, n, sizeof(uint16_t), cmp, NULL);
+
+ while (n &&
+ !cached[n - 1])
+ --n;
+
+ unused = ca->sb.nbuckets - n;
+
+ while (cached < p + n &&
+ *cached == BTREE_PRIO)
+ cached++;
+
+ btree = cached - p;
+ n -= btree;
+
+ for (i = 0; i < n; i++)
+ sum += INITIAL_PRIO - cached[i];
+
+ if (n)
+ do_div(sum, n);
+
+ for (i = 0; i < nq; i++)
+ q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)];
+
+ vfree(p);
+
+ ret = snprintf(buf, PAGE_SIZE,
+ "Unused: %zu%%\n"
+ "Metadata: %zu%%\n"
+ "Average: %llu\n"
+ "Sectors per Q: %zu\n"
+ "Quantiles: [",
+ unused * 100 / (size_t) ca->sb.nbuckets,
+ btree * 100 / (size_t) ca->sb.nbuckets, sum,
+ n * ca->sb.bucket_size / (nq + 1));
+
+ for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ i < nq - 1 ? "%u " : "%u]\n", q[i]);
+
+ buf[PAGE_SIZE - 1] = '\0';
+ return ret;
+ }
+
+ return 0;
+}
+SHOW_LOCKED(bch_cache)
+
+STORE(__bch_cache)
+{
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+
+ if (attr == &sysfs_discard) {
+ bool v = strtoul_or_return(buf);
+
+ if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ ca->discard = v;
+
+ if (v != CACHE_DISCARD(&ca->sb)) {
+ SET_CACHE_DISCARD(&ca->sb, v);
+ bcache_write_super(ca->set);
+ }
+ }
+
+ if (attr == &sysfs_cache_replacement_policy) {
+ ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
+
+ if (v < 0)
+ return v;
+
+ if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
+ mutex_lock(&ca->set->bucket_lock);
+ SET_CACHE_REPLACEMENT(&ca->sb, v);
+ mutex_unlock(&ca->set->bucket_lock);
+
+ bcache_write_super(ca->set);
+ }
+ }
+
+ if (attr == &sysfs_freelist_percent) {
+ DECLARE_FIFO(long, free);
+ long i;
+ size_t p = strtoul_or_return(buf);
+
+ p = clamp_t(size_t,
+ ((size_t) ca->sb.nbuckets * p) / 100,
+ roundup_pow_of_two(ca->sb.nbuckets) >> 9,
+ ca->sb.nbuckets / 2);
+
+ if (!init_fifo_exact(&free, p, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ fifo_move(&free, &ca->free);
+ fifo_swap(&free, &ca->free);
+
+ mutex_unlock(&ca->set->bucket_lock);
+
+ while (fifo_pop(&free, i))
+ atomic_dec(&ca->buckets[i].pin);
+
+ free_fifo(&free);
+ }
+
+ if (attr == &sysfs_clear_stats) {
+ atomic_long_set(&ca->sectors_written, 0);
+ atomic_long_set(&ca->btree_sectors_written, 0);
+ atomic_long_set(&ca->meta_sectors_written, 0);
+ atomic_set(&ca->io_count, 0);
+ atomic_set(&ca->io_errors, 0);
+ }
+
+ return size;
+}
+STORE_LOCKED(bch_cache)
+
+static struct attribute *bch_cache_files[] = {
+ &sysfs_bucket_size,
+ &sysfs_block_size,
+ &sysfs_nbuckets,
+ &sysfs_priority_stats,
+ &sysfs_discard,
+ &sysfs_written,
+ &sysfs_btree_written,
+ &sysfs_metadata_written,
+ &sysfs_io_errors,
+ &sysfs_clear_stats,
+ &sysfs_freelist_percent,
+ &sysfs_cache_replacement_policy,
+ NULL
+};
+KTYPE(bch_cache);
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
new file mode 100644
index 0000000..0526fe9
--- /dev/null
+++ b/drivers/md/bcache/sysfs.h
@@ -0,0 +1,110 @@
+#ifndef _BCACHE_SYSFS_H_
+#define _BCACHE_SYSFS_H_
+
+#define KTYPE(type) \
+struct kobj_type type ## _ktype = { \
+ .release = type ## _release, \
+ .sysfs_ops = &((const struct sysfs_ops) { \
+ .show = type ## _show, \
+ .store = type ## _store \
+ }), \
+ .default_attrs = type ## _files \
+}
+
+#define SHOW(fn) \
+static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
+ char *buf) \
+
+#define STORE(fn) \
+static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
+ const char *buf, size_t size) \
+
+#define SHOW_LOCKED(fn) \
+SHOW(fn) \
+{ \
+ ssize_t ret; \
+ mutex_lock(&bch_register_lock); \
+ ret = __ ## fn ## _show(kobj, attr, buf); \
+ mutex_unlock(&bch_register_lock); \
+ return ret; \
+}
+
+#define STORE_LOCKED(fn) \
+STORE(fn) \
+{ \
+ ssize_t ret; \
+ mutex_lock(&bch_register_lock); \
+ ret = __ ## fn ## _store(kobj, attr, buf, size); \
+ mutex_unlock(&bch_register_lock); \
+ return ret; \
+}
+
+#define __sysfs_attribute(_name, _mode) \
+ static struct attribute sysfs_##_name = \
+ { .name = #_name, .mode = _mode }
+
+#define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
+#define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
+#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
+
+#define sysfs_printf(file, fmt, ...) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \
+} while (0)
+
+#define sysfs_print(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return snprint(buf, PAGE_SIZE, var); \
+} while (0)
+
+#define sysfs_hprint(file, val) \
+do { \
+ if (attr == &sysfs_ ## file) { \
+ ssize_t ret = bch_hprint(buf, val); \
+ strcat(buf, "\n"); \
+ return ret + 1; \
+ } \
+} while (0)
+
+#define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
+#define var_print(_var) sysfs_print(_var, var(_var))
+#define var_hprint(_var) sysfs_hprint(_var, var(_var))
+
+#define sysfs_strtoul(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoul_safe(buf, var) ?: (ssize_t) size; \
+} while (0)
+
+#define sysfs_strtoul_clamp(file, var, min, max) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoul_safe_clamp(buf, var, min, max) \
+ ?: (ssize_t) size; \
+} while (0)
+
+#define strtoul_or_return(cp) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (_r) \
+ return _r; \
+ _v; \
+})
+
+#define strtoi_h_or_return(cp, v) \
+do { \
+ int _r = strtoi_h(cp, &v); \
+ if (_r) \
+ return _r; \
+} while (0)
+
+#define sysfs_hatoi(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoi_h(buf, &var) ?: (ssize_t) size; \
+} while (0)
+
+#endif /* _BCACHE_SYSFS_H_ */
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
new file mode 100644
index 0000000..983f9bb
--- /dev/null
+++ b/drivers/md/bcache/trace.c
@@ -0,0 +1,26 @@
+#include "bcache.h"
+#include "btree.h"
+#include "request.h"
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/bcache.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_passthrough);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_hit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_miss);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writethrough);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_skip);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_dirty);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_dirty);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
new file mode 100644
index 0000000..da3a99e
--- /dev/null
+++ b/drivers/md/bcache/util.c
@@ -0,0 +1,377 @@
+/*
+ * random utiility code, for bcache but in theory not specific to bcache
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "util.h"
+
+#define simple_strtoint(c, end, base) simple_strtol(c, end, base)
+#define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
+
+#define STRTO_H(name, type) \
+int bch_ ## name ## _h(const char *cp, type *res) \
+{ \
+ int u = 0; \
+ char *e; \
+ type i = simple_ ## name(cp, &e, 10); \
+ \
+ switch (tolower(*e)) { \
+ default: \
+ return -EINVAL; \
+ case 'y': \
+ case 'z': \
+ u++; \
+ case 'e': \
+ u++; \
+ case 'p': \
+ u++; \
+ case 't': \
+ u++; \
+ case 'g': \
+ u++; \
+ case 'm': \
+ u++; \
+ case 'k': \
+ u++; \
+ if (e++ == cp) \
+ return -EINVAL; \
+ case '\n': \
+ case '\0': \
+ if (*e == '\n') \
+ e++; \
+ } \
+ \
+ if (*e) \
+ return -EINVAL; \
+ \
+ while (u--) { \
+ if ((type) ~0 > 0 && \
+ (type) ~0 / 1024 <= i) \
+ return -EINVAL; \
+ if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) || \
+ (i < 0 && -ANYSINT_MAX(type) / 1024 > i)) \
+ return -EINVAL; \
+ i *= 1024; \
+ } \
+ \
+ *res = i; \
+ return 0; \
+} \
+
+STRTO_H(strtoint, int)
+STRTO_H(strtouint, unsigned int)
+STRTO_H(strtoll, long long)
+STRTO_H(strtoull, unsigned long long)
+
+ssize_t bch_hprint(char *buf, int64_t v)
+{
+ static const char units[] = "?kMGTPEZY";
+ char dec[4] = "";
+ int u, t = 0;
+
+ for (u = 0; v >= 1024 || v <= -1024; u++) {
+ t = v & ~(~0 << 10);
+ v >>= 10;
+ }
+
+ if (!u)
+ return sprintf(buf, "%llu", v);
+
+ if (v < 100 && v > -100)
+ snprintf(dec, sizeof(dec), ".%i", t / 100);
+
+ return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+}
+
+ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+ size_t selected)
+{
+ char *out = buf;
+ size_t i;
+
+ for (i = 0; list[i]; i++)
+ out += snprintf(out, buf + size - out,
+ i == selected ? "[%s] " : "%s ", list[i]);
+
+ out[-1] = '\n';
+ return out - buf;
+}
+
+ssize_t bch_read_string_list(const char *buf, const char * const list[])
+{
+ size_t i;
+ char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ s = strim(d);
+
+ for (i = 0; list[i]; i++)
+ if (!strcmp(list[i], s))
+ break;
+
+ kfree(d);
+
+ if (!list[i])
+ return -EINVAL;
+
+ return i;
+}
+
+bool bch_is_zero(const char *p, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ if (p[i])
+ return false;
+ return true;
+}
+
+int bch_parse_uuid(const char *s, char *uuid)
+{
+ size_t i, j, x;
+ memset(uuid, 0, 16);
+
+ for (i = 0, j = 0;
+ i < strspn(s, "-0123456789:ABCDEFabcdef") && j < 32;
+ i++) {
+ x = s[i] | 32;
+
+ switch (x) {
+ case '0'...'9':
+ x -= '0';
+ break;
+ case 'a'...'f':
+ x -= 'a' - 10;
+ break;
+ default:
+ continue;
+ }
+
+ if (!(j & 1))
+ x <<= 4;
+ uuid[j++ >> 1] |= x;
+ }
+ return i;
+}
+
+void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
+{
+ uint64_t now = local_clock();
+ uint64_t duration = time_after64(now, start_time)
+ ? now - start_time : 0;
+ uint64_t last = time_after64(now, stats->last)
+ ? now - stats->last : 0;
+
+ stats->max_duration = max(stats->max_duration, duration);
+
+ if (stats->last) {
+ ewma_add(stats->average_duration, duration, 8, 8);
+
+ if (stats->average_frequency)
+ ewma_add(stats->average_frequency, last, 8, 8);
+ else
+ stats->average_frequency = last << 8;
+ } else {
+ stats->average_duration = duration << 8;
+ }
+
+ stats->last = now ?: 1;
+}
+
+unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+{
+ uint64_t now = local_clock();
+
+ d->next += div_u64(done, d->rate);
+
+ return time_after64(d->next, now)
+ ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
+ : 0;
+}
+
+void bch_bio_map(struct bio *bio, void *base)
+{
+ size_t size = bio->bi_size;
+ struct bio_vec *bv = bio->bi_io_vec;
+
+ BUG_ON(!bio->bi_size);
+ BUG_ON(bio->bi_vcnt);
+
+ bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
+ goto start;
+
+ for (; size; bio->bi_vcnt++, bv++) {
+ bv->bv_offset = 0;
+start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
+ size);
+ if (base) {
+ bv->bv_page = is_vmalloc_addr(base)
+ ? vmalloc_to_page(base)
+ : virt_to_page(base);
+
+ base += bv->bv_len;
+ }
+
+ size -= bv->bv_len;
+ }
+}
+
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp);
+ if (!bv->bv_page) {
+ while (bv-- != bio->bi_io_vec + bio->bi_idx)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
+ * use permitted, subject to terms of PostgreSQL license; see.)
+
+ * If we have a 64-bit integer type, then a 64-bit CRC looks just like the
+ * usual sort of implementation. (See Ross Williams' excellent introduction
+ * A PAINLESS GUIDE TO CRC ERROR DETECTION ALGORITHMS, available from
+ * ftp://ftp.rocksoft.com/papers/crc_v3.txt or several other net sites.)
+ * If we have no working 64-bit type, then fake it with two 32-bit registers.
+ *
+ * The present implementation is a normal (not "reflected", in Williams'
+ * terms) 64-bit CRC, using initial all-ones register contents and a final
+ * bit inversion. The chosen polynomial is borrowed from the DLT1 spec
+ * (ECMA-182, available from http://www.ecma.ch/ecma1/STAND/ECMA-182.HTM):
+ *
+ * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x + 1
+*/
+
+static const uint64_t crc_table[256] = {
+ 0x0000000000000000ULL, 0x42F0E1EBA9EA3693ULL, 0x85E1C3D753D46D26ULL,
+ 0xC711223CFA3E5BB5ULL, 0x493366450E42ECDFULL, 0x0BC387AEA7A8DA4CULL,
+ 0xCCD2A5925D9681F9ULL, 0x8E224479F47CB76AULL, 0x9266CC8A1C85D9BEULL,
+ 0xD0962D61B56FEF2DULL, 0x17870F5D4F51B498ULL, 0x5577EEB6E6BB820BULL,
+ 0xDB55AACF12C73561ULL, 0x99A54B24BB2D03F2ULL, 0x5EB4691841135847ULL,
+ 0x1C4488F3E8F96ED4ULL, 0x663D78FF90E185EFULL, 0x24CD9914390BB37CULL,
+ 0xE3DCBB28C335E8C9ULL, 0xA12C5AC36ADFDE5AULL, 0x2F0E1EBA9EA36930ULL,
+ 0x6DFEFF5137495FA3ULL, 0xAAEFDD6DCD770416ULL, 0xE81F3C86649D3285ULL,
+ 0xF45BB4758C645C51ULL, 0xB6AB559E258E6AC2ULL, 0x71BA77A2DFB03177ULL,
+ 0x334A9649765A07E4ULL, 0xBD68D2308226B08EULL, 0xFF9833DB2BCC861DULL,
+ 0x388911E7D1F2DDA8ULL, 0x7A79F00C7818EB3BULL, 0xCC7AF1FF21C30BDEULL,
+ 0x8E8A101488293D4DULL, 0x499B3228721766F8ULL, 0x0B6BD3C3DBFD506BULL,
+ 0x854997BA2F81E701ULL, 0xC7B97651866BD192ULL, 0x00A8546D7C558A27ULL,
+ 0x4258B586D5BFBCB4ULL, 0x5E1C3D753D46D260ULL, 0x1CECDC9E94ACE4F3ULL,
+ 0xDBFDFEA26E92BF46ULL, 0x990D1F49C77889D5ULL, 0x172F5B3033043EBFULL,
+ 0x55DFBADB9AEE082CULL, 0x92CE98E760D05399ULL, 0xD03E790CC93A650AULL,
+ 0xAA478900B1228E31ULL, 0xE8B768EB18C8B8A2ULL, 0x2FA64AD7E2F6E317ULL,
+ 0x6D56AB3C4B1CD584ULL, 0xE374EF45BF6062EEULL, 0xA1840EAE168A547DULL,
+ 0x66952C92ECB40FC8ULL, 0x2465CD79455E395BULL, 0x3821458AADA7578FULL,
+ 0x7AD1A461044D611CULL, 0xBDC0865DFE733AA9ULL, 0xFF3067B657990C3AULL,
+ 0x711223CFA3E5BB50ULL, 0x33E2C2240A0F8DC3ULL, 0xF4F3E018F031D676ULL,
+ 0xB60301F359DBE0E5ULL, 0xDA050215EA6C212FULL, 0x98F5E3FE438617BCULL,
+ 0x5FE4C1C2B9B84C09ULL, 0x1D14202910527A9AULL, 0x93366450E42ECDF0ULL,
+ 0xD1C685BB4DC4FB63ULL, 0x16D7A787B7FAA0D6ULL, 0x5427466C1E109645ULL,
+ 0x4863CE9FF6E9F891ULL, 0x0A932F745F03CE02ULL, 0xCD820D48A53D95B7ULL,
+ 0x8F72ECA30CD7A324ULL, 0x0150A8DAF8AB144EULL, 0x43A04931514122DDULL,
+ 0x84B16B0DAB7F7968ULL, 0xC6418AE602954FFBULL, 0xBC387AEA7A8DA4C0ULL,
+ 0xFEC89B01D3679253ULL, 0x39D9B93D2959C9E6ULL, 0x7B2958D680B3FF75ULL,
+ 0xF50B1CAF74CF481FULL, 0xB7FBFD44DD257E8CULL, 0x70EADF78271B2539ULL,
+ 0x321A3E938EF113AAULL, 0x2E5EB66066087D7EULL, 0x6CAE578BCFE24BEDULL,
+ 0xABBF75B735DC1058ULL, 0xE94F945C9C3626CBULL, 0x676DD025684A91A1ULL,
+ 0x259D31CEC1A0A732ULL, 0xE28C13F23B9EFC87ULL, 0xA07CF2199274CA14ULL,
+ 0x167FF3EACBAF2AF1ULL, 0x548F120162451C62ULL, 0x939E303D987B47D7ULL,
+ 0xD16ED1D631917144ULL, 0x5F4C95AFC5EDC62EULL, 0x1DBC74446C07F0BDULL,
+ 0xDAAD56789639AB08ULL, 0x985DB7933FD39D9BULL, 0x84193F60D72AF34FULL,
+ 0xC6E9DE8B7EC0C5DCULL, 0x01F8FCB784FE9E69ULL, 0x43081D5C2D14A8FAULL,
+ 0xCD2A5925D9681F90ULL, 0x8FDAB8CE70822903ULL, 0x48CB9AF28ABC72B6ULL,
+ 0x0A3B7B1923564425ULL, 0x70428B155B4EAF1EULL, 0x32B26AFEF2A4998DULL,
+ 0xF5A348C2089AC238ULL, 0xB753A929A170F4ABULL, 0x3971ED50550C43C1ULL,
+ 0x7B810CBBFCE67552ULL, 0xBC902E8706D82EE7ULL, 0xFE60CF6CAF321874ULL,
+ 0xE224479F47CB76A0ULL, 0xA0D4A674EE214033ULL, 0x67C58448141F1B86ULL,
+ 0x253565A3BDF52D15ULL, 0xAB1721DA49899A7FULL, 0xE9E7C031E063ACECULL,
+ 0x2EF6E20D1A5DF759ULL, 0x6C0603E6B3B7C1CAULL, 0xF6FAE5C07D3274CDULL,
+ 0xB40A042BD4D8425EULL, 0x731B26172EE619EBULL, 0x31EBC7FC870C2F78ULL,
+ 0xBFC9838573709812ULL, 0xFD39626EDA9AAE81ULL, 0x3A28405220A4F534ULL,
+ 0x78D8A1B9894EC3A7ULL, 0x649C294A61B7AD73ULL, 0x266CC8A1C85D9BE0ULL,
+ 0xE17DEA9D3263C055ULL, 0xA38D0B769B89F6C6ULL, 0x2DAF4F0F6FF541ACULL,
+ 0x6F5FAEE4C61F773FULL, 0xA84E8CD83C212C8AULL, 0xEABE6D3395CB1A19ULL,
+ 0x90C79D3FEDD3F122ULL, 0xD2377CD44439C7B1ULL, 0x15265EE8BE079C04ULL,
+ 0x57D6BF0317EDAA97ULL, 0xD9F4FB7AE3911DFDULL, 0x9B041A914A7B2B6EULL,
+ 0x5C1538ADB04570DBULL, 0x1EE5D94619AF4648ULL, 0x02A151B5F156289CULL,
+ 0x4051B05E58BC1E0FULL, 0x87409262A28245BAULL, 0xC5B073890B687329ULL,
+ 0x4B9237F0FF14C443ULL, 0x0962D61B56FEF2D0ULL, 0xCE73F427ACC0A965ULL,
+ 0x8C8315CC052A9FF6ULL, 0x3A80143F5CF17F13ULL, 0x7870F5D4F51B4980ULL,
+ 0xBF61D7E80F251235ULL, 0xFD913603A6CF24A6ULL, 0x73B3727A52B393CCULL,
+ 0x31439391FB59A55FULL, 0xF652B1AD0167FEEAULL, 0xB4A25046A88DC879ULL,
+ 0xA8E6D8B54074A6ADULL, 0xEA16395EE99E903EULL, 0x2D071B6213A0CB8BULL,
+ 0x6FF7FA89BA4AFD18ULL, 0xE1D5BEF04E364A72ULL, 0xA3255F1BE7DC7CE1ULL,
+ 0x64347D271DE22754ULL, 0x26C49CCCB40811C7ULL, 0x5CBD6CC0CC10FAFCULL,
+ 0x1E4D8D2B65FACC6FULL, 0xD95CAF179FC497DAULL, 0x9BAC4EFC362EA149ULL,
+ 0x158E0A85C2521623ULL, 0x577EEB6E6BB820B0ULL, 0x906FC95291867B05ULL,
+ 0xD29F28B9386C4D96ULL, 0xCEDBA04AD0952342ULL, 0x8C2B41A1797F15D1ULL,
+ 0x4B3A639D83414E64ULL, 0x09CA82762AAB78F7ULL, 0x87E8C60FDED7CF9DULL,
+ 0xC51827E4773DF90EULL, 0x020905D88D03A2BBULL, 0x40F9E43324E99428ULL,
+ 0x2CFFE7D5975E55E2ULL, 0x6E0F063E3EB46371ULL, 0xA91E2402C48A38C4ULL,
+ 0xEBEEC5E96D600E57ULL, 0x65CC8190991CB93DULL, 0x273C607B30F68FAEULL,
+ 0xE02D4247CAC8D41BULL, 0xA2DDA3AC6322E288ULL, 0xBE992B5F8BDB8C5CULL,
+ 0xFC69CAB42231BACFULL, 0x3B78E888D80FE17AULL, 0x7988096371E5D7E9ULL,
+ 0xF7AA4D1A85996083ULL, 0xB55AACF12C735610ULL, 0x724B8ECDD64D0DA5ULL,
+ 0x30BB6F267FA73B36ULL, 0x4AC29F2A07BFD00DULL, 0x08327EC1AE55E69EULL,
+ 0xCF235CFD546BBD2BULL, 0x8DD3BD16FD818BB8ULL, 0x03F1F96F09FD3CD2ULL,
+ 0x41011884A0170A41ULL, 0x86103AB85A2951F4ULL, 0xC4E0DB53F3C36767ULL,
+ 0xD8A453A01B3A09B3ULL, 0x9A54B24BB2D03F20ULL, 0x5D45907748EE6495ULL,
+ 0x1FB5719CE1045206ULL, 0x919735E51578E56CULL, 0xD367D40EBC92D3FFULL,
+ 0x1476F63246AC884AULL, 0x568617D9EF46BED9ULL, 0xE085162AB69D5E3CULL,
+ 0xA275F7C11F7768AFULL, 0x6564D5FDE549331AULL, 0x279434164CA30589ULL,
+ 0xA9B6706FB8DFB2E3ULL, 0xEB46918411358470ULL, 0x2C57B3B8EB0BDFC5ULL,
+ 0x6EA7525342E1E956ULL, 0x72E3DAA0AA188782ULL, 0x30133B4B03F2B111ULL,
+ 0xF7021977F9CCEAA4ULL, 0xB5F2F89C5026DC37ULL, 0x3BD0BCE5A45A6B5DULL,
+ 0x79205D0E0DB05DCEULL, 0xBE317F32F78E067BULL, 0xFCC19ED95E6430E8ULL,
+ 0x86B86ED5267CDBD3ULL, 0xC4488F3E8F96ED40ULL, 0x0359AD0275A8B6F5ULL,
+ 0x41A94CE9DC428066ULL, 0xCF8B0890283E370CULL, 0x8D7BE97B81D4019FULL,
+ 0x4A6ACB477BEA5A2AULL, 0x089A2AACD2006CB9ULL, 0x14DEA25F3AF9026DULL,
+ 0x562E43B4931334FEULL, 0x913F6188692D6F4BULL, 0xD3CF8063C0C759D8ULL,
+ 0x5DEDC41A34BBEEB2ULL, 0x1F1D25F19D51D821ULL, 0xD80C07CD676F8394ULL,
+ 0x9AFCE626CE85B507ULL,
+};
+
+uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len)
+{
+ const unsigned char *data = _data;
+
+ while (len--) {
+ int i = ((int) (crc >> 56) ^ *data++) & 0xFF;
+ crc = crc_table[i] ^ (crc << 8);
+ }
+
+ return crc;
+}
+
+uint64_t bch_crc64(const void *data, size_t len)
+{
+ uint64_t crc = 0xffffffffffffffffULL;
+
+ crc = bch_crc64_update(crc, data, len);
+
+ return crc ^ 0xffffffffffffffffULL;
+}
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
new file mode 100644
index 0000000..577393e
--- /dev/null
+++ b/drivers/md/bcache/util.h
@@ -0,0 +1,589 @@
+
+#ifndef _BCACHE_UTIL_H
+#define _BCACHE_UTIL_H
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/llist.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include "closure.h"
+
+#define PAGE_SECTORS (PAGE_SIZE / 512)
+
+struct closure;
+
+#include <trace/events/bcache.h>
+
+#ifdef CONFIG_BCACHE_EDEBUG
+
+#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
+#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
+
+#else /* EDEBUG */
+
+#define atomic_dec_bug(v) atomic_dec(v)
+#define atomic_inc_bug(v, i) atomic_inc(v)
+
+#endif
+
+#define BITMASK(name, type, field, offset, size) \
+static inline uint64_t name(const type *k) \
+{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \
+ \
+static inline void SET_##name(type *k, uint64_t v) \
+{ \
+ k->field &= ~(~((uint64_t) ~0 << size) << offset); \
+ k->field |= v << offset; \
+}
+
+#define DECLARE_HEAP(type, name) \
+ struct { \
+ size_t size, used; \
+ type *data; \
+ } name
+
+#define init_heap(heap, _size, gfp) \
+({ \
+ size_t _bytes; \
+ (heap)->used = 0; \
+ (heap)->size = (_size); \
+ _bytes = (heap)->size * sizeof(*(heap)->data); \
+ (heap)->data = NULL; \
+ if (_bytes < KMALLOC_MAX_SIZE) \
+ (heap)->data = kmalloc(_bytes, (gfp)); \
+ if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
+ (heap)->data = vmalloc(_bytes); \
+ (heap)->data; \
+})
+
+#define free_heap(heap) \
+do { \
+ if (is_vmalloc_addr((heap)->data)) \
+ vfree((heap)->data); \
+ else \
+ kfree((heap)->data); \
+ (heap)->data = NULL; \
+} while (0)
+
+#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
+
+#define heap_sift(h, i, cmp) \
+do { \
+ size_t _r, _j = i; \
+ \
+ for (; _j * 2 + 1 < (h)->used; _j = _r) { \
+ _r = _j * 2 + 1; \
+ if (_r + 1 < (h)->used && \
+ cmp((h)->data[_r], (h)->data[_r + 1])) \
+ _r++; \
+ \
+ if (cmp((h)->data[_r], (h)->data[_j])) \
+ break; \
+ heap_swap(h, _r, _j); \
+ } \
+} while (0)
+
+#define heap_sift_down(h, i, cmp) \
+do { \
+ while (i) { \
+ size_t p = (i - 1) / 2; \
+ if (cmp((h)->data[i], (h)->data[p])) \
+ break; \
+ heap_swap(h, i, p); \
+ i = p; \
+ } \
+} while (0)
+
+#define heap_add(h, d, cmp) \
+({ \
+ bool _r = !heap_full(h); \
+ if (_r) { \
+ size_t _i = (h)->used++; \
+ (h)->data[_i] = d; \
+ \
+ heap_sift_down(h, _i, cmp); \
+ heap_sift(h, _i, cmp); \
+ } \
+ _r; \
+})
+
+#define heap_pop(h, d, cmp) \
+({ \
+ bool _r = (h)->used; \
+ if (_r) { \
+ (d) = (h)->data[0]; \
+ (h)->used--; \
+ heap_swap(h, 0, (h)->used); \
+ heap_sift(h, 0, cmp); \
+ } \
+ _r; \
+})
+
+#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL)
+
+#define heap_full(h) ((h)->used == (h)->size)
+
+#define DECLARE_FIFO(type, name) \
+ struct { \
+ size_t front, back, size, mask; \
+ type *data; \
+ } name
+
+#define fifo_for_each(c, fifo, iter) \
+ for (iter = (fifo)->front; \
+ c = (fifo)->data[iter], iter != (fifo)->back; \
+ iter = (iter + 1) & (fifo)->mask)
+
+#define __init_fifo(fifo, gfp) \
+({ \
+ size_t _allocated_size, _bytes; \
+ BUG_ON(!(fifo)->size); \
+ \
+ _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
+ _bytes = _allocated_size * sizeof(*(fifo)->data); \
+ \
+ (fifo)->mask = _allocated_size - 1; \
+ (fifo)->front = (fifo)->back = 0; \
+ (fifo)->data = NULL; \
+ \
+ if (_bytes < KMALLOC_MAX_SIZE) \
+ (fifo)->data = kmalloc(_bytes, (gfp)); \
+ if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
+ (fifo)->data = vmalloc(_bytes); \
+ (fifo)->data; \
+})
+
+#define init_fifo_exact(fifo, _size, gfp) \
+({ \
+ (fifo)->size = (_size); \
+ __init_fifo(fifo, gfp); \
+})
+
+#define init_fifo(fifo, _size, gfp) \
+({ \
+ (fifo)->size = (_size); \
+ if ((fifo)->size > 4) \
+ (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
+ __init_fifo(fifo, gfp); \
+})
+
+#define free_fifo(fifo) \
+do { \
+ if (is_vmalloc_addr((fifo)->data)) \
+ vfree((fifo)->data); \
+ else \
+ kfree((fifo)->data); \
+ (fifo)->data = NULL; \
+} while (0)
+
+#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
+#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
+
+#define fifo_empty(fifo) (!fifo_used(fifo))
+#define fifo_full(fifo) (!fifo_free(fifo))
+
+#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
+#define fifo_back(fifo) \
+ ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
+
+#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
+
+#define fifo_push_back(fifo, i) \
+({ \
+ bool _r = !fifo_full((fifo)); \
+ if (_r) { \
+ (fifo)->data[(fifo)->back++] = (i); \
+ (fifo)->back &= (fifo)->mask; \
+ } \
+ _r; \
+})
+
+#define fifo_pop_front(fifo, i) \
+({ \
+ bool _r = !fifo_empty((fifo)); \
+ if (_r) { \
+ (i) = (fifo)->data[(fifo)->front++]; \
+ (fifo)->front &= (fifo)->mask; \
+ } \
+ _r; \
+})
+
+#define fifo_push_front(fifo, i) \
+({ \
+ bool _r = !fifo_full((fifo)); \
+ if (_r) { \
+ --(fifo)->front; \
+ (fifo)->front &= (fifo)->mask; \
+ (fifo)->data[(fifo)->front] = (i); \
+ } \
+ _r; \
+})
+
+#define fifo_pop_back(fifo, i) \
+({ \
+ bool _r = !fifo_empty((fifo)); \
+ if (_r) { \
+ --(fifo)->back; \
+ (fifo)->back &= (fifo)->mask; \
+ (i) = (fifo)->data[(fifo)->back] \
+ } \
+ _r; \
+})
+
+#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
+#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
+
+#define fifo_swap(l, r) \
+do { \
+ swap((l)->front, (r)->front); \
+ swap((l)->back, (r)->back); \
+ swap((l)->size, (r)->size); \
+ swap((l)->mask, (r)->mask); \
+ swap((l)->data, (r)->data); \
+} while (0)
+
+#define fifo_move(dest, src) \
+do { \
+ typeof(*((dest)->data)) _t; \
+ while (!fifo_full(dest) && \
+ fifo_pop(src, _t)) \
+ fifo_push(dest, _t); \
+} while (0)
+
+/*
+ * Simple array based allocator - preallocates a number of elements and you can
+ * never allocate more than that, also has no locking.
+ *
+ * Handy because if you know you only need a fixed number of elements you don't
+ * have to worry about memory allocation failure, and sometimes a mempool isn't
+ * what you want.
+ *
+ * We treat the free elements as entries in a singly linked list, and the
+ * freelist as a stack - allocating and freeing push and pop off the freelist.
+ */
+
+#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
+ struct { \
+ type *freelist; \
+ type data[size]; \
+ } name
+
+#define array_alloc(array) \
+({ \
+ typeof((array)->freelist) _ret = (array)->freelist; \
+ \
+ if (_ret) \
+ (array)->freelist = *((typeof((array)->freelist) *) _ret);\
+ \
+ _ret; \
+})
+
+#define array_free(array, ptr) \
+do { \
+ typeof((array)->freelist) _ptr = ptr; \
+ \
+ *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
+ (array)->freelist = _ptr; \
+} while (0)
+
+#define array_allocator_init(array) \
+do { \
+ typeof((array)->freelist) _i; \
+ \
+ BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
+ (array)->freelist = NULL; \
+ \
+ for (_i = (array)->data; \
+ _i < (array)->data + ARRAY_SIZE((array)->data); \
+ _i++) \
+ array_free(array, _i); \
+} while (0)
+
+#define array_freelist_empty(array) ((array)->freelist == NULL)
+
+#define ANYSINT_MAX(t) \
+ ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
+
+int bch_strtoint_h(const char *, int *);
+int bch_strtouint_h(const char *, unsigned int *);
+int bch_strtoll_h(const char *, long long *);
+int bch_strtoull_h(const char *, unsigned long long *);
+
+static inline int bch_strtol_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+ return bch_strtoint_h(cp, (int *) res);
+#else
+ return bch_strtoll_h(cp, (long long *) res);
+#endif
+}
+
+static inline int bch_strtoul_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+ return bch_strtouint_h(cp, (unsigned int *) res);
+#else
+ return bch_strtoull_h(cp, (unsigned long long *) res);
+#endif
+}
+
+#define strtoi_h(cp, res) \
+ (__builtin_types_compatible_p(typeof(*res), int) \
+ ? bch_strtoint_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), long) \
+ ? bch_strtol_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), long long) \
+ ? bch_strtoll_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), unsigned int) \
+ ? bch_strtouint_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), unsigned long) \
+ ? bch_strtoul_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
+ ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
+
+#define strtoul_safe(cp, var) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r) \
+ var = _v; \
+ _r; \
+})
+
+#define strtoul_safe_clamp(cp, var, min, max) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r) \
+ var = clamp_t(typeof(var), _v, min, max); \
+ _r; \
+})
+
+#define snprint(buf, size, var) \
+ snprintf(buf, size, \
+ __builtin_types_compatible_p(typeof(var), int) \
+ ? "%i\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned) \
+ ? "%u\n" : \
+ __builtin_types_compatible_p(typeof(var), long) \
+ ? "%li\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned long)\
+ ? "%lu\n" : \
+ __builtin_types_compatible_p(typeof(var), int64_t) \
+ ? "%lli\n" : \
+ __builtin_types_compatible_p(typeof(var), uint64_t) \
+ ? "%llu\n" : \
+ __builtin_types_compatible_p(typeof(var), const char *) \
+ ? "%s\n" : "%i\n", var)
+
+ssize_t bch_hprint(char *buf, int64_t v);
+
+bool bch_is_zero(const char *p, size_t n);
+int bch_parse_uuid(const char *s, char *uuid);
+
+ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+ size_t selected);
+
+ssize_t bch_read_string_list(const char *buf, const char * const list[]);
+
+struct time_stats {
+ /*
+ * all fields are in nanoseconds, averages are ewmas stored left shifted
+ * by 8
+ */
+ uint64_t max_duration;
+ uint64_t average_duration;
+ uint64_t average_frequency;
+ uint64_t last;
+};
+
+void bch_time_stats_update(struct time_stats *stats, uint64_t time);
+
+#define NSEC_PER_ns 1L
+#define NSEC_PER_us NSEC_PER_USEC
+#define NSEC_PER_ms NSEC_PER_MSEC
+#define NSEC_PER_sec NSEC_PER_SEC
+
+#define __print_time_stat(stats, name, stat, units) \
+ sysfs_print(name ## _ ## stat ## _ ## units, \
+ div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
+
+#define sysfs_print_time_stats(stats, name, \
+ frequency_units, \
+ duration_units) \
+do { \
+ __print_time_stat(stats, name, \
+ average_frequency, frequency_units); \
+ __print_time_stat(stats, name, \
+ average_duration, duration_units); \
+ __print_time_stat(stats, name, \
+ max_duration, duration_units); \
+ \
+ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
+ ? div_s64(local_clock() - (stats)->last, \
+ NSEC_PER_ ## frequency_units) \
+ : -1LL); \
+} while (0)
+
+#define sysfs_time_stats_attribute(name, \
+ frequency_units, \
+ duration_units) \
+read_attribute(name ## _average_frequency_ ## frequency_units); \
+read_attribute(name ## _average_duration_ ## duration_units); \
+read_attribute(name ## _max_duration_ ## duration_units); \
+read_attribute(name ## _last_ ## frequency_units)
+
+#define sysfs_time_stats_attribute_list(name, \
+ frequency_units, \
+ duration_units) \
+&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
+&sysfs_ ## name ## _average_duration_ ## duration_units, \
+&sysfs_ ## name ## _max_duration_ ## duration_units, \
+&sysfs_ ## name ## _last_ ## frequency_units,
+
+#define ewma_add(ewma, val, weight, factor) \
+({ \
+ (ewma) *= (weight) - 1; \
+ (ewma) += (val) << factor; \
+ (ewma) /= (weight); \
+ (ewma) >> factor; \
+})
+
+struct ratelimit {
+ uint64_t next;
+ unsigned rate;
+};
+
+static inline void ratelimit_reset(struct ratelimit *d)
+{
+ d->next = local_clock();
+}
+
+unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+
+#define __DIV_SAFE(n, d, zero) \
+({ \
+ typeof(n) _n = (n); \
+ typeof(d) _d = (d); \
+ _d ? _n / _d : zero; \
+})
+
+#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
+
+#define container_of_or_null(ptr, type, member) \
+({ \
+ typeof(ptr) _ptr = ptr; \
+ _ptr ? container_of(_ptr, type, member) : NULL; \
+})
+
+#define RB_INSERT(root, new, member, cmp) \
+({ \
+ __label__ dup; \
+ struct rb_node **n = &(root)->rb_node, *parent = NULL; \
+ typeof(new) this; \
+ int res, ret = -1; \
+ \
+ while (*n) { \
+ parent = *n; \
+ this = container_of(*n, typeof(*(new)), member); \
+ res = cmp(new, this); \
+ if (!res) \
+ goto dup; \
+ n = res < 0 \
+ ? &(*n)->rb_left \
+ : &(*n)->rb_right; \
+ } \
+ \
+ rb_link_node(&(new)->member, parent, n); \
+ rb_insert_color(&(new)->member, root); \
+ ret = 0; \
+dup: \
+ ret; \
+})
+
+#define RB_SEARCH(root, search, member, cmp) \
+({ \
+ struct rb_node *n = (root)->rb_node; \
+ typeof(&(search)) this, ret = NULL; \
+ int res; \
+ \
+ while (n) { \
+ this = container_of(n, typeof(search), member); \
+ res = cmp(&(search), this); \
+ if (!res) { \
+ ret = this; \
+ break; \
+ } \
+ n = res < 0 \
+ ? n->rb_left \
+ : n->rb_right; \
+ } \
+ ret; \
+})
+
+#define RB_GREATER(root, search, member, cmp) \
+({ \
+ struct rb_node *n = (root)->rb_node; \
+ typeof(&(search)) this, ret = NULL; \
+ int res; \
+ \
+ while (n) { \
+ this = container_of(n, typeof(search), member); \
+ res = cmp(&(search), this); \
+ if (res < 0) { \
+ ret = this; \
+ n = n->rb_left; \
+ } else \
+ n = n->rb_right; \
+ } \
+ ret; \
+})
+
+#define RB_FIRST(root, type, member) \
+ container_of_or_null(rb_first(root), type, member)
+
+#define RB_LAST(root, type, member) \
+ container_of_or_null(rb_last(root), type, member)
+
+#define RB_NEXT(ptr, member) \
+ container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
+
+#define RB_PREV(ptr, member) \
+ container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
+
+/* Does linear interpolation between powers of two */
+static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
+{
+ unsigned fract = x & ~(~0 << fract_bits);
+
+ x >>= fract_bits;
+ x = 1 << x;
+ x += (x * fract) >> fract_bits;
+
+ return x;
+}
+
+#define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio))
+
+void bch_bio_map(struct bio *bio, void *base);
+
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);
+
+static inline sector_t bdev_sectors(struct block_device *bdev)
+{
+ return bdev->bd_inode->i_size >> 9;
+}
+
+#define closure_bio_submit(bio, cl, dev) \
+do { \
+ closure_get(cl); \
+ bch_generic_make_request(bio, &(dev)->bio_split_hook); \
+} while (0)
+
+uint64_t bch_crc64_update(uint64_t, const void *, size_t);
+uint64_t bch_crc64(const void *, size_t);
+
+#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
new file mode 100644
index 0000000..93e7e31
--- /dev/null
+++ b/drivers/md/bcache/writeback.c
@@ -0,0 +1,414 @@
+/*
+ * background writeback - scan btree for dirty data and write it to the backing
+ * device
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+
+static struct workqueue_struct *dirty_wq;
+
+static void read_dirty(struct closure *);
+
+struct dirty_io {
+ struct closure cl;
+ struct cached_dev *dc;
+ struct bio bio;
+};
+
+/* Rate limiting */
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
+ struct cache_set *c = dc->disk.c;
+ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+ uint64_t cache_dirty_target =
+ div_u64(cache_sectors * dc->writeback_percent, 100);
+
+ int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
+ c->cached_dev_sectors);
+
+ /* PD controller */
+
+ int change = 0;
+ int64_t error;
+ int64_t dirty = atomic_long_read(&dc->disk.sectors_dirty);
+ int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+
+ dc->disk.sectors_dirty_last = dirty;
+
+ derivative *= dc->writeback_rate_d_term;
+ derivative = clamp(derivative, -dirty, dirty);
+
+ derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+ dc->writeback_rate_d_smooth, 0);
+
+ /* Avoid divide by zero */
+ if (!target)
+ goto out;
+
+ error = div64_s64((dirty + derivative - target) << 8, target);
+
+ change = div_s64((dc->writeback_rate.rate * error) >> 8,
+ dc->writeback_rate_p_term_inverse);
+
+ /* Don't increase writeback rate if the device isn't keeping up */
+ if (change > 0 &&
+ time_after64(local_clock(),
+ dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+ change = 0;
+
+ dc->writeback_rate.rate =
+ clamp_t(int64_t, dc->writeback_rate.rate + change,
+ 1, NSEC_PER_MSEC);
+out:
+ dc->writeback_rate_derivative = derivative;
+ dc->writeback_rate_change = change;
+ dc->writeback_rate_target = target;
+
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+}
+
+static void update_writeback_rate(struct work_struct *work)
+{
+ struct cached_dev *dc = container_of(to_delayed_work(work),
+ struct cached_dev,
+ writeback_rate_update);
+
+ down_read(&dc->writeback_lock);
+
+ if (atomic_read(&dc->has_dirty) &&
+ dc->writeback_percent)
+ __update_writeback_rate(dc);
+
+ up_read(&dc->writeback_lock);
+}
+
+static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
+{
+ if (atomic_read(&dc->disk.detaching) ||
+ !dc->writeback_percent)
+ return 0;
+
+ return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+}
+
+/* Background writeback */
+
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+{
+ return KEY_DIRTY(k);
+}
+
+static void dirty_init(struct keybuf_key *w)
+{
+ struct dirty_io *io = w->private;
+ struct bio *bio = &io->bio;
+
+ bio_init(bio);
+ if (!io->dc->writeback_percent)
+ bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ bio->bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
+ bio->bi_private = w;
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bch_bio_map(bio, NULL);
+}
+
+static void refill_dirty(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev,
+ writeback.cl);
+ struct keybuf *buf = &dc->writeback_keys;
+ bool searched_from_start = false;
+ struct bkey end = MAX_KEY;
+ SET_KEY_INODE(&end, dc->disk.id);
+
+ if (!atomic_read(&dc->disk.detaching) &&
+ !dc->writeback_running)
+ closure_return(cl);
+
+ down_write(&dc->writeback_lock);
+
+ if (!atomic_read(&dc->has_dirty)) {
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+ bch_write_bdev_super(dc, NULL);
+
+ up_write(&dc->writeback_lock);
+ closure_return(cl);
+ }
+
+ if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+ buf->last_scanned = KEY(dc->disk.id, 0, 0);
+ searched_from_start = true;
+ }
+
+ bch_refill_keybuf(dc->disk.c, buf, &end);
+
+ if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
+ /* Searched the entire btree - delay awhile */
+
+ if (RB_EMPTY_ROOT(&buf->keys)) {
+ atomic_set(&dc->has_dirty, 0);
+ cached_dev_put(dc);
+ }
+
+ if (!atomic_read(&dc->disk.detaching))
+ closure_delay(&dc->writeback, dc->writeback_delay * HZ);
+ }
+
+ up_write(&dc->writeback_lock);
+
+ ratelimit_reset(&dc->writeback_rate);
+
+ /* Punt to workqueue only so we don't recurse and blow the stack */
+ continue_at(cl, read_dirty, dirty_wq);
+}
+
+void bch_writeback_queue(struct cached_dev *dc)
+{
+ if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
+ if (!atomic_read(&dc->disk.detaching))
+ closure_delay(&dc->writeback, dc->writeback_delay * HZ);
+
+ continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
+ }
+}
+
+void bch_writeback_add(struct cached_dev *dc, unsigned sectors)
+{
+ atomic_long_add(sectors, &dc->disk.sectors_dirty);
+
+ if (!atomic_read(&dc->has_dirty) &&
+ !atomic_xchg(&dc->has_dirty, 1)) {
+ atomic_inc(&dc->count);
+
+ if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+ /* XXX: should do this synchronously */
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ bch_writeback_queue(dc);
+
+ if (dc->writeback_percent)
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+ }
+}
+
+/* Background writeback - IO loop */
+
+static void dirty_io_destructor(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ kfree(io);
+}
+
+static void write_dirty_finish(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+ struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt);
+
+ while (bv-- != io->bio.bi_io_vec)
+ __free_page(bv->bv_page);
+
+ /* This is kind of a dumb way of signalling errors. */
+ if (KEY_DIRTY(&w->key)) {
+ unsigned i;
+ struct btree_op op;
+ bch_btree_op_init_stack(&op);
+
+ op.type = BTREE_REPLACE;
+ bkey_copy(&op.replace, &w->key);
+
+ SET_KEY_DIRTY(&w->key, false);
+ bch_keylist_add(&op.keys, &w->key);
+
+ for (i = 0; i < KEY_PTRS(&w->key); i++)
+ atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
+
+ pr_debug("clearing %s", pkey(&w->key));
+ bch_btree_insert(&op, dc->disk.c);
+ closure_sync(&op.cl);
+
+ atomic_long_inc(op.insert_collision
+ ? &dc->disk.c->writeback_keys_failed
+ : &dc->disk.c->writeback_keys_done);
+ }
+
+ bch_keybuf_del(&dc->writeback_keys, w);
+ atomic_dec_bug(&dc->in_flight);
+
+ closure_wake_up(&dc->writeback_wait);
+
+ closure_return_with_destructor(cl, dirty_io_destructor);
+}
+
+static void dirty_endio(struct bio *bio, int error)
+{
+ struct keybuf_key *w = bio->bi_private;
+ struct dirty_io *io = w->private;
+
+ if (error)
+ SET_KEY_DIRTY(&w->key, false);
+
+ closure_put(&io->cl);
+}
+
+static void write_dirty(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ struct keybuf_key *w = io->bio.bi_private;
+
+ dirty_init(w);
+ io->bio.bi_rw = WRITE;
+ io->bio.bi_sector = KEY_START(&w->key);
+ io->bio.bi_bdev = io->dc->bdev;
+ io->bio.bi_end_io = dirty_endio;
+
+ trace_bcache_write_dirty(&io->bio);
+ closure_bio_submit(&io->bio, cl, &io->dc->disk);
+
+ continue_at(cl, write_dirty_finish, dirty_wq);
+}
+
+static void read_dirty_endio(struct bio *bio, int error)
+{
+ struct keybuf_key *w = bio->bi_private;
+ struct dirty_io *io = w->private;
+
+ bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
+ error, "reading dirty data from cache");
+
+ dirty_endio(bio, error);
+}
+
+static void read_dirty_submit(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+
+ trace_bcache_read_dirty(&io->bio);
+ closure_bio_submit(&io->bio, cl, &io->dc->disk);
+
+ continue_at(cl, write_dirty, dirty_wq);
+}
+
+static void read_dirty(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev,
+ writeback.cl);
+ unsigned delay = writeback_delay(dc, 0);
+ struct keybuf_key *w;
+ struct dirty_io *io;
+
+ /*
+ * XXX: if we error, background writeback just spins. Should use some
+ * mempools.
+ */
+
+ while (1) {
+ w = bch_keybuf_next(&dc->writeback_keys);
+ if (!w)
+ break;
+
+ BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
+
+ if (delay > 0 &&
+ (KEY_START(&w->key) != dc->last_read ||
+ jiffies_to_msecs(delay) > 50)) {
+ w->private = NULL;
+
+ closure_delay(&dc->writeback, delay);
+ continue_at(cl, read_dirty, dirty_wq);
+ }
+
+ dc->last_read = KEY_OFFSET(&w->key);
+
+ io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
+ * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+
+ dirty_init(w);
+ io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
+ io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
+ &w->key, 0)->bdev;
+ io->bio.bi_rw = READ;
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ pr_debug("%s", pkey(&w->key));
+
+ closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+
+ delay = writeback_delay(dc, KEY_SIZE(&w->key));
+
+ atomic_inc(&dc->in_flight);
+
+ if (!closure_wait_event(&dc->writeback_wait, cl,
+ atomic_read(&dc->in_flight) < 64))
+ continue_at(cl, read_dirty, dirty_wq);
+ }
+
+ if (0) {
+err_free:
+ kfree(w->private);
+err:
+ bch_keybuf_del(&dc->writeback_keys, w);
+ }
+
+ refill_dirty(cl);
+}
+
+void bch_writeback_init_cached_dev(struct cached_dev *dc)
+{
+ closure_init_unlocked(&dc->writeback);
+ init_rwsem(&dc->writeback_lock);
+
+ bch_keybuf_init(&dc->writeback_keys, dirty_pred);
+
+ dc->writeback_metadata = true;
+ dc->writeback_running = true;
+ dc->writeback_percent = 10;
+ dc->writeback_delay = 30;
+ dc->writeback_rate.rate = 1024;
+
+ dc->writeback_rate_update_seconds = 30;
+ dc->writeback_rate_d_term = 16;
+ dc->writeback_rate_p_term_inverse = 64;
+ dc->writeback_rate_d_smooth = 8;
+
+ INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+}
+
+void bch_writeback_exit(void)
+{
+ if (dirty_wq)
+ destroy_workqueue(dirty_wq);
+}
+
+int __init bch_writeback_init(void)
+{
+ dirty_wq = create_singlethread_workqueue("bcache_writeback");
+ if (!dirty_wq)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13c1548..6d2d41a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -858,8 +858,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
unsigned int i;
struct bio_vec *bv;
- for (i = 0; i < clone->bi_vcnt; i++) {
- bv = bio_iovec_idx(clone, i);
+ bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
bv->bv_page = NULL;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d053098..699b5be 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
{
io->bdev = m->dev->bdev;
io->sector = map_sector(m, bio);
- io->count = bio->bi_size >> 9;
+ io->count = bio_sectors(bio);
}
static void hold_bio(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d8837d3..ea5e878 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -258,7 +258,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
sector_t begin, end;
stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
- stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
+ stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index a746f1d..b948fd8 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -501,7 +501,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
return -EIO;
}
- if ((bio->bi_sector + bio_sectors(bio)) >>
+ if (bio_end_sector(bio) >>
(v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
DMERR_LIMIT("io out of range");
return -EIO;
@@ -519,7 +519,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
- io->io_vec_size = bio->bi_vcnt - bio->bi_idx;
+ io->io_vec_size = bio_segments(bio);
if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
io->io_vec = io->io_vec_inline;
else
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9a0bdad..d5370a9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -339,7 +339,7 @@ out:
return md ? 0 : -ENXIO;
}
-static int dm_blk_close(struct gendisk *disk, fmode_t mode)
+static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
@@ -349,8 +349,6 @@ static int dm_blk_close(struct gendisk *disk, fmode_t mode)
dm_put(md);
spin_unlock(&_minor_lock);
-
- return 0;
}
int dm_open_count(struct mapped_device *md)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 5e7dc77..3193aef 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -185,8 +185,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return;
}
- if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
- WRITE))
+ if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
failit = 1;
if (check_mode(conf, WritePersistent)) {
add_sector(conf, bio->bi_sector, WritePersistent);
@@ -196,8 +195,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
failit = 1;
} else {
/* read request */
- if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9),
- READ))
+ if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
failit = 1;
if (check_mode(conf, ReadTransient))
failit = 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 2101483..f03fabd 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -317,8 +317,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
bio_io_error(bio);
return;
}
- if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
- tmp_dev->end_sector)) {
+ if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4c74424..681d109 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -197,21 +197,12 @@ void md_trim_bio(struct bio *bio, int offset, int size)
if (offset == 0 && size == bio->bi_size)
return;
- bio->bi_sector += offset;
- bio->bi_size = size;
- offset <<= 9;
clear_bit(BIO_SEG_VALID, &bio->bi_flags);
- while (bio->bi_idx < bio->bi_vcnt &&
- bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
- /* remove this whole bio_vec */
- offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
- bio->bi_idx++;
- }
- if (bio->bi_idx < bio->bi_vcnt) {
- bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
- bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
- }
+ bio_advance(bio, offset << 9);
+
+ bio->bi_size = size;
+
/* avoid any complications with bi_idx being non-zero*/
if (bio->bi_idx) {
memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
@@ -6674,15 +6665,13 @@ static int md_open(struct block_device *bdev, fmode_t mode)
return err;
}
-static int md_release(struct gendisk *disk, fmode_t mode)
+static void md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
mddev_put(mddev);
-
- return 0;
}
static int md_media_changed(struct gendisk *disk)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0505452..fcf65e5 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
{
if (likely(is_power_of_2(chunk_sects))) {
return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
- + (bio->bi_size >> 9));
+ + bio_sectors(bio));
} else{
sector_t sector = bio->bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects)
- + (bio->bi_size >> 9));
+ + bio_sectors(bio));
}
}
@@ -527,8 +527,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
sector_t sector = bio->bi_sector;
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
- if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
- bio->bi_idx != 0)
+ if (bio_segments(bio) > 1)
goto bad_map;
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
@@ -567,7 +566,7 @@ bad_map:
printk("md/raid0:%s: make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n",
mdname(mddev), chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
bio_io_error(bio);
return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 851023e..5595118 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -92,7 +92,6 @@ static void r1bio_pool_free(void *r1_bio, void *data)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
- struct page *page;
struct r1bio *r1_bio;
struct bio *bio;
int i, j;
@@ -122,14 +121,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
j = 1;
while(j--) {
bio = r1_bio->bios[j];
- for (i = 0; i < RESYNC_PAGES; i++) {
- page = alloc_page(gfp_flags);
- if (unlikely(!page))
- goto out_free_pages;
+ bio->bi_vcnt = RESYNC_PAGES;
- bio->bi_io_vec[i].bv_page = page;
- bio->bi_vcnt = i+1;
- }
+ if (bio_alloc_pages(bio, gfp_flags))
+ goto out_free_bio;
}
/* If not user-requests, copy the page pointers to all bios */
if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -143,11 +138,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
return r1_bio;
-out_free_pages:
- for (j=0 ; j < pi->raid_disks; j++)
- for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
- put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
- j = -1;
out_free_bio:
while (++j < pi->raid_disks)
bio_put(r1_bio->bios[j]);
@@ -267,7 +257,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
(bio_data_dir(bio) == WRITE) ? "write" : "read",
(unsigned long long) bio->bi_sector,
(unsigned long long) bio->bi_sector +
- (bio->bi_size >> 9) - 1);
+ bio_sectors(bio) - 1);
call_bio_endio(r1_bio);
}
@@ -458,7 +448,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
" %llu-%llu\n",
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
- (mbio->bi_size >> 9) - 1);
+ bio_sectors(mbio) - 1);
call_bio_endio(r1_bio);
}
}
@@ -925,7 +915,7 @@ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
if (unlikely(!bvecs))
return;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, i) {
bvecs[i] = *bvec;
bvecs[i].bv_page = alloc_page(GFP_NOIO);
if (unlikely(!bvecs[i].bv_page))
@@ -1023,7 +1013,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
if (bio_data_dir(bio) == WRITE &&
- bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
+ bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_sector < mddev->suspend_hi) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
@@ -1034,7 +1024,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
flush_signals(current);
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
- if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
+ if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_sector >= mddev->suspend_hi)
break;
schedule();
@@ -1054,7 +1044,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio;
- r1_bio->sectors = bio->bi_size >> 9;
+ r1_bio->sectors = bio_sectors(bio);
r1_bio->state = 0;
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector;
@@ -1132,7 +1122,7 @@ read_again:
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio;
- r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1289,14 +1279,10 @@ read_again:
struct bio_vec *bvec;
int j;
- /* Yes, I really want the '__' version so that
- * we clear any unused pointer in the io_vec, rather
- * than leave them unchanged. This is important
- * because when we come to free the pages, we won't
- * know the original bi_idx, so we just free
- * them all
+ /*
+ * We trimmed the bio, so _all is legit
*/
- __bio_for_each_segment(bvec, mbio, j, 0)
+ bio_for_each_segment_all(bvec, mbio, j)
bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
@@ -1334,14 +1320,14 @@ read_again:
/* Mustn't call r1_bio_write_done before this next test,
* as it could result in the bio being freed.
*/
- if (sectors_handled < (bio->bi_size >> 9)) {
+ if (sectors_handled < bio_sectors(bio)) {
r1_bio_write_done(r1_bio);
/* We need another r1_bio. It has already been counted
* in bio->bi_phys_segments
*/
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio;
- r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1867,7 +1853,7 @@ static int process_checks(struct r1bio *r1_bio)
struct bio *sbio = r1_bio->bios[i];
int size;
- if (r1_bio->bios[i]->bi_end_io != end_sync_read)
+ if (sbio->bi_end_io != end_sync_read)
continue;
if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
@@ -1892,16 +1878,15 @@ static int process_checks(struct r1bio *r1_bio)
continue;
}
/* fixup the bio for reuse */
+ bio_reset(sbio);
sbio->bi_vcnt = vcnt;
sbio->bi_size = r1_bio->sectors << 9;
- sbio->bi_idx = 0;
- sbio->bi_phys_segments = 0;
- sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
- sbio->bi_flags |= 1 << BIO_UPTODATE;
- sbio->bi_next = NULL;
sbio->bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ sbio->bi_end_io = end_sync_read;
+ sbio->bi_private = r1_bio;
+
size = sbio->bi_size;
for (j = 0; j < vcnt ; j++) {
struct bio_vec *bi;
@@ -1912,10 +1897,9 @@ static int process_checks(struct r1bio *r1_bio)
else
bi->bv_len = size;
size -= PAGE_SIZE;
- memcpy(page_address(bi->bv_page),
- page_address(pbio->bi_io_vec[j].bv_page),
- PAGE_SIZE);
}
+
+ bio_copy_data(sbio, pbio);
}
return 0;
}
@@ -1952,7 +1936,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
wbio->bi_rw = WRITE;
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
- md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
+ md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio);
}
@@ -2064,32 +2048,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
}
}
-static void bi_complete(struct bio *bio, int error)
-{
- complete((struct completion *)bio->bi_private);
-}
-
-static int submit_bio_wait(int rw, struct bio *bio)
-{
- struct completion event;
- rw |= REQ_SYNC;
-
- init_completion(&event);
- bio->bi_private = &event;
- bio->bi_end_io = bi_complete;
- submit_bio(rw, bio);
- wait_for_completion(&event);
-
- return test_bit(BIO_UPTODATE, &bio->bi_flags);
-}
-
static int narrow_write_error(struct r1bio *r1_bio, int i)
{
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
struct md_rdev *rdev = conf->mirrors[i].rdev;
- int vcnt, idx;
- struct bio_vec *vec;
/* bio has the data to be written to device 'i' where
* we just recently had a write error.
@@ -2117,30 +2080,32 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
& ~(sector_t)(block_sectors - 1))
- sector;
- if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- vcnt = r1_bio->behind_page_count;
- vec = r1_bio->behind_bvecs;
- idx = 0;
- while (vec[idx].bv_page == NULL)
- idx++;
- } else {
- vcnt = r1_bio->master_bio->bi_vcnt;
- vec = r1_bio->master_bio->bi_io_vec;
- idx = r1_bio->master_bio->bi_idx;
- }
while (sect_to_write) {
struct bio *wbio;
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors'*/
- wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
- memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
- wbio->bi_sector = r1_bio->sector;
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ unsigned vcnt = r1_bio->behind_page_count;
+ struct bio_vec *vec = r1_bio->behind_bvecs;
+
+ while (!vec->bv_page) {
+ vec++;
+ vcnt--;
+ }
+
+ wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
+ memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
+
+ wbio->bi_vcnt = vcnt;
+ } else {
+ wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
+ }
+
wbio->bi_rw = WRITE;
- wbio->bi_vcnt = vcnt;
+ wbio->bi_sector = r1_bio->sector;
wbio->bi_size = r1_bio->sectors << 9;
- wbio->bi_idx = idx;
md_trim_bio(wbio, sector - r1_bio->sector, sectors);
wbio->bi_sector += rdev->data_offset;
@@ -2289,8 +2254,7 @@ read_more:
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = mbio;
- r1_bio->sectors = (mbio->bi_size >> 9)
- - sectors_handled;
+ r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
r1_bio->state = 0;
set_bit(R1BIO_ReadError, &r1_bio->state);
r1_bio->mddev = mddev;
@@ -2464,18 +2428,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev;
bio = r1_bio->bios[i];
-
- /* take from bio_init */
- bio->bi_next = NULL;
- bio->bi_flags &= ~(BIO_POOL_MASK-1);
- bio->bi_flags |= 1 << BIO_UPTODATE;
- bio->bi_rw = READ;
- bio->bi_vcnt = 0;
- bio->bi_idx = 0;
- bio->bi_phys_segments = 0;
- bio->bi_size = 0;
- bio->bi_end_io = NULL;
- bio->bi_private = NULL;
+ bio_reset(bio);
rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev == NULL ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 018741b..59d4daa 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1174,14 +1174,13 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/* If this request crosses a chunk boundary, we need to
* split it. This will only happen for 1 PAGE (or less) requests.
*/
- if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
+ if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
> chunk_sects
&& (conf->geo.near_copies < conf->geo.raid_disks
|| conf->prev.near_copies < conf->prev.raid_disks))) {
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
- if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
- bio->bi_idx != 0)
+ if (bio_segments(bio) > 1)
goto bad_map;
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
@@ -1214,7 +1213,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
bad_map:
printk("md/raid10:%s: make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
bio_io_error(bio);
return;
@@ -1229,7 +1228,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
*/
wait_barrier(conf);
- sectors = bio->bi_size >> 9;
+ sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_sector < conf->reshape_progress &&
bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1331,8 +1330,7 @@ read_again:
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
- r10_bio->sectors = ((bio->bi_size >> 9)
- - sectors_handled);
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->state = 0;
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1574,7 +1572,7 @@ retry_write:
* after checking if we need to go around again.
*/
- if (sectors_handled < (bio->bi_size >> 9)) {
+ if (sectors_handled < bio_sectors(bio)) {
one_write_done(r10_bio);
/* We need another r10_bio. It has already been counted
* in bio->bi_phys_segments.
@@ -1582,7 +1580,7 @@ retry_write:
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
- r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2084,13 +2082,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* First we need to fixup bv_offset, bv_len and
* bi_vecs, as the read request might have corrupted these
*/
+ bio_reset(tbio);
+
tbio->bi_vcnt = vcnt;
tbio->bi_size = r10_bio->sectors << 9;
- tbio->bi_idx = 0;
- tbio->bi_phys_segments = 0;
- tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
- tbio->bi_flags |= 1 << BIO_UPTODATE;
- tbio->bi_next = NULL;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
tbio->bi_sector = r10_bio->devs[i].addr;
@@ -2108,7 +2103,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
- md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2133,7 +2128,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
- tbio->bi_size >> 9);
+ bio_sectors(tbio));
generic_make_request(tbio);
}
@@ -2259,13 +2254,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
wbio2 = r10_bio->devs[1].repl_bio;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio);
}
if (wbio2 && wbio2->bi_end_io) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
- wbio2->bi_size >> 9);
+ bio_sectors(wbio2));
generic_make_request(wbio2);
}
}
@@ -2536,25 +2531,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
}
}
-static void bi_complete(struct bio *bio, int error)
-{
- complete((struct completion *)bio->bi_private);
-}
-
-static int submit_bio_wait(int rw, struct bio *bio)
-{
- struct completion event;
- rw |= REQ_SYNC;
-
- init_completion(&event);
- bio->bi_private = &event;
- bio->bi_end_io = bi_complete;
- submit_bio(rw, bio);
- wait_for_completion(&event);
-
- return test_bit(BIO_UPTODATE, &bio->bi_flags);
-}
-
static int narrow_write_error(struct r10bio *r10_bio, int i)
{
struct bio *bio = r10_bio->master_bio;
@@ -2695,8 +2671,7 @@ read_more:
r10_bio = mempool_alloc(conf->r10bio_pool,
GFP_NOIO);
r10_bio->master_bio = mbio;
- r10_bio->sectors = (mbio->bi_size >> 9)
- - sectors_handled;
+ r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
r10_bio->state = 0;
set_bit(R10BIO_ReadError,
&r10_bio->state);
@@ -3133,6 +3108,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
bio = r10_bio->devs[0].bio;
+ bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
@@ -3157,6 +3133,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
rdev = mirror->rdev;
if (!test_bit(In_sync, &rdev->flags)) {
bio = r10_bio->devs[1].bio;
+ bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
@@ -3185,6 +3162,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (rdev == NULL || bio == NULL ||
test_bit(Faulty, &rdev->flags))
break;
+ bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
@@ -3283,7 +3261,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[i].repl_bio->bi_end_io = NULL;
bio = r10_bio->devs[i].bio;
- bio->bi_end_io = NULL;
+ bio_reset(bio);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (conf->mirrors[d].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[d].rdev->flags))
@@ -3320,6 +3298,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio;
+ bio_reset(bio);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
sector = r10_bio->devs[i].addr;
@@ -3353,17 +3332,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
- for (bio = biolist; bio ; bio=bio->bi_next) {
-
- bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- if (bio->bi_end_io)
- bio->bi_flags |= 1 << BIO_UPTODATE;
- bio->bi_vcnt = 0;
- bio->bi_idx = 0;
- bio->bi_phys_segments = 0;
- bio->bi_size = 0;
- }
-
nr_sectors = 0;
if (sector_nr + max_sync < max_sector)
max_sector = sector_nr + max_sync;
@@ -4411,7 +4379,6 @@ read_more:
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
- read_bio->bi_idx = 0;
read_bio->bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4435,17 +4402,14 @@ read_more:
}
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
continue;
+
+ bio_reset(b);
b->bi_bdev = rdev2->bdev;
b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
b->bi_rw = WRITE;
- b->bi_flags &= ~(BIO_POOL_MASK - 1);
- b->bi_flags |= 1 << BIO_UPTODATE;
b->bi_next = blist;
- b->bi_vcnt = 0;
- b->bi_idx = 0;
- b->bi_size = 0;
blist = b;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4a7be45..9359828 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
*/
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
- int sectors = bio->bi_size >> 9;
+ int sectors = bio_sectors(bio);
if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next;
else
@@ -569,14 +569,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi = &sh->dev[i].req;
rbi = &sh->dev[i].rreq; /* For writing to replacement */
- bi->bi_rw = rw;
- rbi->bi_rw = rw;
- if (rw & WRITE) {
- bi->bi_end_io = raid5_end_write_request;
- rbi->bi_end_io = raid5_end_write_request;
- } else
- bi->bi_end_io = raid5_end_read_request;
-
rcu_read_lock();
rrdev = rcu_dereference(conf->disks[i].replacement);
smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
@@ -651,7 +643,14 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(STRIPE_IO_STARTED, &sh->state);
+ bio_reset(bi);
bi->bi_bdev = rdev->bdev;
+ bi->bi_rw = rw;
+ bi->bi_end_io = (rw & WRITE)
+ ? raid5_end_write_request
+ : raid5_end_read_request;
+ bi->bi_private = sh;
+
pr_debug("%s: for %llu schedule op %ld on disc %d\n",
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
@@ -665,12 +664,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_FLUSH;
- bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_idx = 0;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
- bi->bi_next = NULL;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -687,7 +683,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(STRIPE_IO_STARTED, &sh->state);
+ bio_reset(rbi);
rbi->bi_bdev = rrdev->bdev;
+ rbi->bi_rw = rw;
+ BUG_ON(!(rw & WRITE));
+ rbi->bi_end_io = raid5_end_write_request;
+ rbi->bi_private = sh;
+
pr_debug("%s: for %llu schedule op %ld on "
"replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
@@ -699,12 +701,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else
rbi->bi_sector = (sh->sector
+ rrdev->data_offset);
- rbi->bi_flags = 1 << BIO_UPTODATE;
- rbi->bi_idx = 0;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
- rbi->bi_next = NULL;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
rbi, disk_devt(conf->mddev->gendisk),
@@ -2402,11 +2401,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
} else
bip = &sh->dev[dd_idx].toread;
while (*bip && (*bip)->bi_sector < bi->bi_sector) {
- if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
+ if (bio_end_sector(*bip) > bi->bi_sector)
goto overlap;
bip = & (*bip)->bi_next;
}
- if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
+ if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2422,8 +2421,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
bi && bi->bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
- if (bi->bi_sector + (bi->bi_size>>9) >= sector)
- sector = bi->bi_sector + (bi->bi_size>>9);
+ if (bio_end_sector(bi) >= sector)
+ sector = bio_end_sector(bi);
}
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
@@ -3849,7 +3848,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors;
- unsigned int bio_sectors = bio->bi_size >> 9;
+ unsigned int bio_sectors = bio_sectors(bio);
if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors;
@@ -3941,7 +3940,7 @@ static int bio_fits_rdev(struct bio *bi)
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
- if ((bi->bi_size>>9) > queue_max_sectors(q))
+ if (bio_sectors(bi) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
if (bi->bi_phys_segments > queue_max_segments(q))
@@ -3988,7 +3987,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
0,
&dd_idx, NULL);
- end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
+ end_sector = bio_end_sector(align_bi);
rcu_read_lock();
rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) ||
@@ -4011,7 +4010,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+ is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
/* too big in some way, or has a known bad block */
bio_put(align_bi);
@@ -4273,7 +4272,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_sector + (bi->bi_size>>9);
+ last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4739,7 +4738,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
- last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
+ last_sector = bio_end_sector(raid_bio);
for (; logical_sector < last_sector;
logical_sector += STRIPE_SECTORS,
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 57601c0..1f925e8 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2527,11 +2527,8 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
if (dvbdev->users == -1) {
wake_up(&fepriv->wait_queue);
- if (fepriv->exit != DVB_FE_NO_EXIT) {
- fops_put(file->f_op);
- file->f_op = NULL;
+ if (fepriv->exit != DVB_FE_NO_EXIT)
wake_up(&dvbdev->wait_queue);
- }
if (fe->ops.ts_bus_ctrl)
fe->ops.ts_bus_ctrl(fe, 0);
}
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index c3cc3b5..f91c80c 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1479,11 +1479,8 @@ static int dvb_net_close(struct inode *inode, struct file *file)
dvb_generic_release(inode, file);
- if(dvbdev->users == 1 && dvbnet->exit == 1) {
- fops_put(file->f_op);
- file->f_op = NULL;
+ if(dvbdev->users == 1 && dvbnet->exit == 1)
wake_up(&dvbdev->wait_queue);
- }
return 0;
}
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index b7c52dc..d43911d 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -397,8 +397,8 @@ int init_bttv_i2c(struct bttv *btv)
int fini_bttv_i2c(struct bttv *btv)
{
- if (0 != btv->i2c_rc)
- return 0;
+ if (btv->i2c_rc == 0)
+ i2c_del_adapter(&btv->c.i2c_adap);
- return i2c_del_adapter(&btv->c.i2c_adap);
+ return 0;
}
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index b9be535..68dbc2d 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -259,79 +259,46 @@ void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
static int cx25821_get_audio_data(struct cx25821_dev *dev,
const struct sram_channel *sram_ch)
{
- struct file *myfile;
+ struct file *file;
int frame_index_temp = dev->_audioframe_index;
int i = 0;
- int line_size = AUDIO_LINE_SIZE;
int frame_size = AUDIO_DATA_BUF_SZ;
int frame_offset = frame_size * frame_index_temp;
- ssize_t vfs_read_retval = 0;
- char mybuf[line_size];
+ char mybuf[AUDIO_LINE_SIZE];
loff_t file_offset = dev->_audioframe_count * frame_size;
- loff_t pos;
- mm_segment_t old_fs;
+ char *p = NULL;
if (dev->_audiofile_status == END_OF_FILE)
return 0;
- myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
+ file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(file)) {
+ pr_err("%s(): ERROR opening file(%s) with errno = %ld!\n",
+ __func__, dev->_audiofilename, -PTR_ERR(file));
+ return PTR_ERR(file);
+ }
- if (IS_ERR(myfile)) {
- const int open_errno = -PTR_ERR(myfile);
- pr_err("%s(): ERROR opening file(%s) with errno = %d!\n",
- __func__, dev->_audiofilename, open_errno);
- return PTR_ERR(myfile);
- } else {
- if (!(myfile->f_op)) {
- pr_err("%s(): File has no file operations registered!\n",
- __func__);
- filp_close(myfile, NULL);
- return -EIO;
- }
+ if (dev->_audiodata_buf_virt_addr)
+ p = (char *)dev->_audiodata_buf_virt_addr + frame_offset;
- if (!myfile->f_op->read) {
- pr_err("%s(): File has no READ operations registered!\n",
+ for (i = 0; i < dev->_audio_lines_count; i++) {
+ int n = kernel_read(file, file_offset, mybuf, AUDIO_LINE_SIZE);
+ if (n < AUDIO_LINE_SIZE) {
+ pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
__func__);
- filp_close(myfile, NULL);
- return -EIO;
+ dev->_audiofile_status = END_OF_FILE;
+ fput(file);
+ return 0;
}
-
- pos = myfile->f_pos;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- for (i = 0; i < dev->_audio_lines_count; i++) {
- pos = file_offset;
-
- vfs_read_retval = vfs_read(myfile, mybuf, line_size,
- &pos);
-
- if (vfs_read_retval > 0 && vfs_read_retval == line_size
- && dev->_audiodata_buf_virt_addr != NULL) {
- memcpy((void *)(dev->_audiodata_buf_virt_addr +
- frame_offset / 4), mybuf,
- vfs_read_retval);
- }
-
- file_offset += vfs_read_retval;
- frame_offset += vfs_read_retval;
-
- if (vfs_read_retval < line_size) {
- pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
- __func__);
- break;
- }
+ dev->_audiofile_status = IN_PROGRESS;
+ if (p) {
+ memcpy(p, mybuf, n);
+ p += n;
}
-
- if (i > 0)
- dev->_audioframe_count++;
-
- dev->_audiofile_status = (vfs_read_retval == line_size) ?
- IN_PROGRESS : END_OF_FILE;
-
- set_fs(old_fs);
- filp_close(myfile, NULL);
+ file_offset += n;
}
+ dev->_audioframe_count++;
+ fput(file);
return 0;
}
@@ -354,81 +321,41 @@ static void cx25821_audioups_handler(struct work_struct *work)
static int cx25821_openfile_audio(struct cx25821_dev *dev,
const struct sram_channel *sram_ch)
{
- struct file *myfile;
- int i = 0, j = 0;
- int line_size = AUDIO_LINE_SIZE;
- ssize_t vfs_read_retval = 0;
- char mybuf[line_size];
- loff_t pos;
- loff_t offset = (unsigned long)0;
- mm_segment_t old_fs;
-
- myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
-
- if (IS_ERR(myfile)) {
- const int open_errno = -PTR_ERR(myfile);
- pr_err("%s(): ERROR opening file(%s) with errno = %d!\n",
- __func__, dev->_audiofilename, open_errno);
- return PTR_ERR(myfile);
- } else {
- if (!(myfile->f_op)) {
- pr_err("%s(): File has no file operations registered!\n",
- __func__);
- filp_close(myfile, NULL);
- return -EIO;
- }
-
- if (!myfile->f_op->read) {
- pr_err("%s(): File has no READ operations registered!\n",
- __func__);
- filp_close(myfile, NULL);
- return -EIO;
- }
-
- pos = myfile->f_pos;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- for (j = 0; j < NUM_AUDIO_FRAMES; j++) {
- for (i = 0; i < dev->_audio_lines_count; i++) {
- pos = offset;
-
- vfs_read_retval = vfs_read(myfile, mybuf,
- line_size, &pos);
-
- if (vfs_read_retval > 0 &&
- vfs_read_retval == line_size &&
- dev->_audiodata_buf_virt_addr != NULL) {
- memcpy((void *)(dev->
- _audiodata_buf_virt_addr
- + offset / 4), mybuf,
- vfs_read_retval);
- }
+ char *p = (void *)dev->_audiodata_buf_virt_addr;
+ struct file *file;
+ loff_t offset;
+ int i, j;
+
+ file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(file)) {
+ pr_err("%s(): ERROR opening file(%s) with errno = %ld!\n",
+ __func__, dev->_audiofilename, PTR_ERR(file));
+ return PTR_ERR(file);
+ }
- offset += vfs_read_retval;
+ for (j = 0, offset = 0; j < NUM_AUDIO_FRAMES; j++) {
+ for (i = 0; i < dev->_audio_lines_count; i++) {
+ char buf[AUDIO_LINE_SIZE];
+ int n = kernel_read(file, offset, buf,
+ AUDIO_LINE_SIZE);
- if (vfs_read_retval < line_size) {
- pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
- __func__);
- break;
- }
+ if (n < AUDIO_LINE_SIZE) {
+ pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
+ __func__);
+ dev->_audiofile_status = END_OF_FILE;
+ fput(file);
+ return 0;
}
- if (i > 0)
- dev->_audioframe_count++;
+ if (p)
+ memcpy(p + offset, buf, n);
- if (vfs_read_retval < line_size)
- break;
+ offset += n;
}
-
- dev->_audiofile_status = (vfs_read_retval == line_size) ?
- IN_PROGRESS : END_OF_FILE;
-
- set_fs(old_fs);
- myfile->f_pos = 0;
- filp_close(myfile, NULL);
+ dev->_audioframe_count++;
}
-
+ dev->_audiofile_status = IN_PROGRESS;
+ fput(file);
return 0;
}
diff --git a/drivers/media/pci/mantis/mantis_i2c.c b/drivers/media/pci/mantis/mantis_i2c.c
index 937fb9d..895ddba 100644
--- a/drivers/media/pci/mantis/mantis_i2c.c
+++ b/drivers/media/pci/mantis/mantis_i2c.c
@@ -261,6 +261,8 @@ int mantis_i2c_exit(struct mantis_pci *mantis)
mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
dprintk(MANTIS_DEBUG, 1, "Removing I2C adapter");
- return i2c_del_adapter(&mantis->adapter);
+ i2c_del_adapter(&mantis->adapter);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mantis_i2c_exit);
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index eb82286..0e763a7 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -375,7 +375,7 @@ int av7110_ir_init(struct av7110 *av7110)
if (av_cnt == 1) {
e = proc_create("av7110_ir", S_IWUSR, NULL, &av7110_ir_proc_fops);
if (e)
- e->size = 4 + 256 * sizeof(u16);
+ proc_set_size(e, 4 + 256 * sizeof(u16));
}
tasklet_init(&av7110->ir.ir_tasklet, av7110_emit_key, (unsigned long) &av7110->ir);
diff --git a/drivers/media/pci/zoran/zoran_procfs.c b/drivers/media/pci/zoran/zoran_procfs.c
index 1512b5d..f7ceee0 100644
--- a/drivers/media/pci/zoran/zoran_procfs.c
+++ b/drivers/media/pci/zoran/zoran_procfs.c
@@ -130,14 +130,14 @@ static int zoran_show(struct seq_file *p, void *v)
static int zoran_open(struct inode *inode, struct file *file)
{
- struct zoran *data = PDE(inode)->data;
+ struct zoran *data = PDE_DATA(inode);
return single_open(file, zoran_show, data);
}
static ssize_t zoran_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- struct zoran *zr = PDE(file_inode(file))->data;
+ struct zoran *zr = PDE_DATA(file_inode(file));
char *string, *sp;
char *line, *ldelim, *varname, *svar, *tdelim;
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index ff4d93d..e456126 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -307,7 +307,7 @@ static void ir_lirc_close(void *data)
return;
}
-static struct file_operations lirc_fops = {
+static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
.unlocked_ioctl = ir_lirc_ioctl,
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 5247d94..8dc057b 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -152,7 +152,7 @@ static int lirc_thread(void *irctl)
}
-static struct file_operations lirc_dev_fops = {
+static const struct file_operations lirc_dev_fops = {
.owner = THIS_MODULE,
.read = lirc_dev_fop_read,
.write = lirc_dev_fop_write,
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index f12b78d..f4176ca 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -204,7 +204,7 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
}
-static int mspro_block_disk_release(struct gendisk *disk)
+static void mspro_block_disk_release(struct gendisk *disk)
{
struct mspro_block_data *msb = disk->private_data;
int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
@@ -224,13 +224,11 @@ static int mspro_block_disk_release(struct gendisk *disk)
}
mutex_unlock(&mspro_block_disk_lock);
-
- return 0;
}
-static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
+static void mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
{
- return mspro_block_disk_release(disk);
+ mspro_block_disk_release(disk);
}
static int mspro_block_bd_getgeo(struct block_device *bdev,
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index fb69baa..767ff4d 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6656,7 +6656,7 @@ static int mpt_summary_proc_show(struct seq_file *m, void *v)
static int mpt_summary_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, mpt_summary_proc_show, PDE(inode)->data);
+ return single_open(file, mpt_summary_proc_show, PDE_DATA(inode));
}
static const struct file_operations mpt_summary_proc_fops = {
@@ -6805,7 +6805,7 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
static int mpt_iocinfo_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, mpt_iocinfo_proc_show, PDE(inode)->data);
+ return single_open(file, mpt_iocinfo_proc_show, PDE_DATA(inode));
}
static const struct file_operations mpt_iocinfo_proc_fops = {
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b383b69..dcc8385 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,13 +597,6 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
}
static int
-mptctl_release(struct inode *inode, struct file *filep)
-{
- fasync_helper(-1, filep, 0, &async_queue);
- return 0;
-}
-
-static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
MPT_ADAPTER *ioc;
@@ -2822,7 +2815,6 @@ static const struct file_operations mptctl_fops = {
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
- .release = mptctl_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c13cd9b..fd75108 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -109,7 +109,7 @@ static int mptfc_host_reset(struct scsi_cmnd *SCpnt);
static struct scsi_host_template mptfc_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptfc",
- .proc_info = mptscsih_proc_info,
+ .show_info = mptscsih_show_info,
.name = "MPT FC Host",
.info = mptscsih_info,
.queuecommand = mptfc_qcmd,
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index fa43c39..dd239bd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1977,7 +1977,7 @@ done:
static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
- .proc_info = mptscsih_proc_info,
+ .show_info = mptscsih_show_info,
.name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
+ if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
- rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
+ ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
+ bio_segments(rsp->bio), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 164afa7..727819c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1284,101 +1284,17 @@ mptscsih_info(struct Scsi_Host *SChost)
return h->info_kbuf;
}
-struct info_str {
- char *buffer;
- int length;
- int offset;
- int pos;
-};
-
-static void
-mptscsih_copy_mem_info(struct info_str *info, char *data, int len)
-{
- if (info->pos + len > info->length)
- len = info->length - info->pos;
-
- if (info->pos + len < info->offset) {
- info->pos += len;
- return;
- }
-
- if (info->pos < info->offset) {
- data += (info->offset - info->pos);
- len -= (info->offset - info->pos);
- }
-
- if (len > 0) {
- memcpy(info->buffer + info->pos, data, len);
- info->pos += len;
- }
-}
-
-static int
-mptscsih_copy_info(struct info_str *info, char *fmt, ...)
-{
- va_list args;
- char buf[81];
- int len;
-
- va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
- va_end(args);
-
- mptscsih_copy_mem_info(info, buf, len);
- return len;
-}
-
-static int
-mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len)
-{
- struct info_str info;
-
- info.buffer = pbuf;
- info.length = len;
- info.offset = offset;
- info.pos = 0;
-
- mptscsih_copy_info(&info, "%s: %s, ", ioc->name, ioc->prod_name);
- mptscsih_copy_info(&info, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
- mptscsih_copy_info(&info, "Ports=%d, ", ioc->facts.NumberOfPorts);
- mptscsih_copy_info(&info, "MaxQ=%d\n", ioc->req_depth);
-
- return ((info.pos > info.offset) ? info.pos - info.offset : 0);
-}
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_proc_info - Return information about MPT adapter
- * @host: scsi host struct
- * @buffer: if write, user data; if read, buffer for user
- * @start: returns the buffer address
- * @offset: if write, 0; if read, the current offset into the buffer from
- * the previous read.
- * @length: if write, return length;
- * @func: write = 1; read = 0
- *
- * (linux scsi_host_template.info routine)
- */
-int
-mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int func)
+int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
{
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
- int size = 0;
- if (func) {
- /*
- * write is not supported
- */
- } else {
- if (start)
- *start = buffer;
-
- size = mptscsih_host_info(ioc, buffer, offset, length);
- }
+ seq_printf(m, "%s: %s, ", ioc->name, ioc->prod_name);
+ seq_printf(m, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
+ seq_printf(m, "Ports=%d, ", ioc->facts.NumberOfPorts);
+ seq_printf(m, "MaxQ=%d\n", ioc->req_depth);
- return size;
+ return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3348,7 +3264,7 @@ EXPORT_SYMBOL(mptscsih_shutdown);
EXPORT_SYMBOL(mptscsih_suspend);
EXPORT_SYMBOL(mptscsih_resume);
#endif
-EXPORT_SYMBOL(mptscsih_proc_info);
+EXPORT_SYMBOL(mptscsih_show_info);
EXPORT_SYMBOL(mptscsih_info);
EXPORT_SYMBOL(mptscsih_qcmd);
EXPORT_SYMBOL(mptscsih_slave_destroy);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 43e75ff..83f5031 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -111,7 +111,7 @@ extern void mptscsih_shutdown(struct pci_dev *);
extern int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
extern int mptscsih_resume(struct pci_dev *pdev);
#endif
-extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
+extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index c3aabde..5653e50 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -831,7 +831,7 @@ static void mptspi_slave_destroy(struct scsi_device *sdev)
static struct scsi_host_template mptspi_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptspi",
- .proc_info = mptscsih_proc_info,
+ .show_info = mptscsih_show_info,
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptspi_qcmd,
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 49e86ae..6fc3866 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -600,10 +600,8 @@ static int i2o_block_open(struct block_device *bdev, fmode_t mode)
*
* Unlock and unmount the media, and power down the device. Gets called if
* the block device is closed.
- *
- * Returns 0 on success or negative error code on failure.
*/
-static int i2o_block_release(struct gendisk *disk, fmode_t mode)
+static void i2o_block_release(struct gendisk *disk, fmode_t mode)
{
struct i2o_block_device *dev = disk->private_data;
u8 operation;
@@ -617,7 +615,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode)
* the TID no longer exists.
*/
if (!dev->i2o_dev)
- return 0;
+ return;
mutex_lock(&i2o_block_mutex);
i2o_block_device_flush(dev->i2o_dev);
@@ -631,8 +629,6 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode)
i2o_block_device_power(dev, operation);
mutex_unlock(&i2o_block_mutex);
-
- return 0;
}
static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 8001aa6..b7d87cd 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -1599,98 +1599,98 @@ static int i2o_seq_show_sensors(struct seq_file *seq, void *v)
static int i2o_seq_open_hrt(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_hrt, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_hrt, PDE_DATA(inode));
};
static int i2o_seq_open_lct(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_lct, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_lct, PDE_DATA(inode));
};
static int i2o_seq_open_status(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_status, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_status, PDE_DATA(inode));
};
static int i2o_seq_open_hw(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_hw, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_hw, PDE_DATA(inode));
};
static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_ddm_table, PDE_DATA(inode));
};
static int i2o_seq_open_driver_store(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_driver_store, PDE_DATA(inode));
};
static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_drivers_stored, PDE_DATA(inode));
};
static int i2o_seq_open_groups(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_groups, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_groups, PDE_DATA(inode));
};
static int i2o_seq_open_phys_device(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_phys_device, PDE_DATA(inode));
};
static int i2o_seq_open_claimed(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_claimed, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_claimed, PDE_DATA(inode));
};
static int i2o_seq_open_users(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_users, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_users, PDE_DATA(inode));
};
static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_priv_msgs, PDE_DATA(inode));
};
static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_authorized_users,
- PDE(inode)->data);
+ PDE_DATA(inode));
};
static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_dev_identity, PDE_DATA(inode));
};
static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_ddm_identity, PDE_DATA(inode));
};
static int i2o_seq_open_uinfo(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_uinfo, PDE_DATA(inode));
};
static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_sgl_limits, PDE_DATA(inode));
};
static int i2o_seq_open_sensors(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_sensors, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_sensors, PDE_DATA(inode));
};
static int i2o_seq_open_dev_name(struct inode *inode, struct file *file)
{
- return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data);
+ return single_open(file, i2o_seq_show_dev_name, PDE_DATA(inode));
};
static const struct file_operations i2o_seq_fops_lct = {
@@ -1895,25 +1895,6 @@ static int i2o_proc_create_entries(struct proc_dir_entry *dir,
}
/**
- * i2o_proc_subdir_remove - Remove child entries from a proc entry
- * @dir: proc dir entry from which the childs should be removed
- *
- * Iterate over each i2o proc entry under dir and remove it. If the child
- * also has entries, remove them too.
- */
-static void i2o_proc_subdir_remove(struct proc_dir_entry *dir)
-{
- struct proc_dir_entry *pe, *tmp;
- pe = dir->subdir;
- while (pe) {
- tmp = pe->next;
- i2o_proc_subdir_remove(pe);
- remove_proc_entry(pe->name, dir);
- pe = tmp;
- }
-};
-
-/**
* i2o_proc_device_add - Add an I2O device to the proc dir
* @dir: proc dir entry to which the device should be added
* @dev: I2O device which should be added
@@ -1932,14 +1913,12 @@ static void i2o_proc_device_add(struct proc_dir_entry *dir,
osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff);
- devdir = proc_mkdir(buff, dir);
+ devdir = proc_mkdir_data(buff, 0, dir, dev);
if (!devdir) {
osm_warn("Could not allocate procdir!\n");
return;
}
- devdir->data = dev;
-
i2o_proc_create_entries(devdir, generic_dev_entries, dev);
/* Inform core that we want updates about this device's status */
@@ -1973,12 +1952,10 @@ static int i2o_proc_iop_add(struct proc_dir_entry *dir,
osm_debug("adding IOP /proc/i2o/%s\n", c->name);
- iopdir = proc_mkdir(c->name, dir);
+ iopdir = proc_mkdir_data(c->name, 0, dir, c);
if (!iopdir)
return -1;
- iopdir->data = c;
-
i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c);
list_for_each_entry(dev, &c->devices, list)
@@ -1988,31 +1965,6 @@ static int i2o_proc_iop_add(struct proc_dir_entry *dir,
}
/**
- * i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree
- * @dir: parent proc dir entry
- * @c: I2O controller which should be removed
- *
- * Iterate over each i2o proc entry and search controller c. If it is found
- * remove it from the tree.
- */
-static void i2o_proc_iop_remove(struct proc_dir_entry *dir,
- struct i2o_controller *c)
-{
- struct proc_dir_entry *pe, *tmp;
-
- pe = dir->subdir;
- while (pe) {
- tmp = pe->next;
- if (pe->data == c) {
- i2o_proc_subdir_remove(pe);
- remove_proc_entry(pe->name, dir);
- }
- osm_debug("removing IOP /proc/i2o/%s\n", c->name);
- pe = tmp;
- }
-}
-
-/**
* i2o_proc_fs_create - Create the i2o proc fs.
*
* Iterate over each I2O controller and create the entries for it.
@@ -2042,12 +1994,7 @@ static int __init i2o_proc_fs_create(void)
*/
static int __exit i2o_proc_fs_destroy(void)
{
- struct i2o_controller *c;
-
- list_for_each_entry(c, &i2o_controllers, list)
- i2o_proc_iop_remove(i2o_proc_dir_root, c);
-
- remove_proc_entry("i2o", NULL);
+ remove_proc_subtree("i2o", NULL);
return 0;
};
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 893fc1b..31ca555 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1144,17 +1144,15 @@ static int pm860x_probe(struct i2c_client *client,
return -ENOMEM;
ret = pm860x_dt_init(node, &client->dev, pdata);
if (ret)
- goto err;
+ return ret;
} else if (!pdata) {
pr_info("No platform data in %s!\n", __func__);
return -EINVAL;
}
chip = kzalloc(sizeof(struct pm860x_chip), GFP_KERNEL);
- if (chip == NULL) {
- ret = -ENOMEM;
- goto err;
- }
+ if (chip == NULL)
+ return -ENOMEM;
chip->id = verify_addr(client);
chip->regmap = regmap_init_i2c(client, &pm860x_regmap_config);
@@ -1194,10 +1192,6 @@ static int pm860x_probe(struct i2c_client *client,
pm860x_device_init(chip, pdata);
return 0;
-err:
- if (node)
- devm_kfree(&client->dev, pdata);
- return ret;
}
static int pm860x_remove(struct i2c_client *client)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ca86581..d9aed15 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -10,19 +10,240 @@ config MFD_CORE
select IRQ_DOMAIN
default n
-config MFD_88PM860X
- bool "Support Marvell 88PM8606/88PM8607"
+config MFD_CS5535
+ tristate "AMD CS5535 and CS5536 southbridge core functions"
+ select MFD_CORE
+ depends on PCI && X86
+ ---help---
+ This is the core driver for CS5535/CS5536 MFD functions. This is
+ necessary for using the board's GPIO and MFGPT functionality.
+
+config MFD_AS3711
+ bool "AMS AS3711"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
depends on I2C=y && GENERIC_HARDIRQS
+ help
+ Support for the AS3711 PMIC from AMS
+
+config PMIC_ADP5520
+ bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
+ depends on I2C=y
+ help
+ Say yes here to add support for Analog Devices AD5520 and ADP5501,
+ Multifunction Power Management IC. This includes
+ the I2C driver and the core APIs _only_, you have to select
+ individual components like LCD backlight, LEDs, GPIOs and Kepad
+ under the corresponding menus.
+
+config MFD_AAT2870_CORE
+ bool "AnalogicTech AAT2870"
+ select MFD_CORE
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+ help
+ If you say yes here you get support for the AAT2870.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
+config MFD_CROS_EC
+ tristate "ChromeOS Embedded Controller"
+ select MFD_CORE
+ help
+ If you say Y here you get support for the ChromeOS Embedded
+ Controller (EC) providing keyboard, battery and power services.
+ You also ned to enable the driver for the bus you are using. The
+ protocol for talking to the EC is defined by the bus driver.
+
+config MFD_CROS_EC_I2C
+ tristate "ChromeOS Embedded Controller (I2C)"
+ depends on MFD_CROS_EC && I2C
+
+ help
+ If you say Y here, you get support for talking to the ChromeOS
+ EC through an I2C bus. This uses a simple byte-level protocol with
+ a checksum. Failing accesses will be retried three times to
+ improve reliability.
+
+config MFD_CROS_EC_SPI
+ tristate "ChromeOS Embedded Controller (SPI)"
+ depends on MFD_CROS_EC && SPI
+
+ ---help---
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through a SPI bus, using a byte-level protocol. Since the EC's
+ response time cannot be guaranteed, we support ignoring
+ 'pre-amble' bytes before the response actually starts.
+
+config MFD_ASIC3
+ bool "Compaq ASIC3"
+ depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+ select MFD_CORE
+ ---help---
+ This driver supports the ASIC3 multifunction chip found on many
+ PDAs (mainly iPAQ and HTC based ones)
+
+config PMIC_DA903X
+ bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
+ depends on I2C=y
+ help
+ Say yes here to support for Dialog Semiconductor DA9030 (a.k.a
+ ARAVA) and DA9034 (a.k.a MICCO), these are Power Management IC
+ usually found on PXA processors-based platforms. This includes
+ the I2C driver and the core APIs _only_, you have to select
+ individual components like LCD backlight, voltage regulators,
+ LEDs and battery-charger under the corresponding menus.
+
+config PMIC_DA9052
+ bool
+ select MFD_CORE
+
+config MFD_DA9052_SPI
+ bool "Dialog Semiconductor DA9052/53 PMIC variants with SPI"
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on SPI_MASTER=y && GENERIC_HARDIRQS
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using SPI. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
+config MFD_DA9052_I2C
+ bool "Dialog Semiconductor DA9052/53 PMIC variants with I2C"
select REGMAP_I2C
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on I2C=y && GENERIC_HARDIRQS
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using I2C. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
+config MFD_DA9055
+ bool "Dialog Semiconductor DA9055 PMIC Support"
+ select REGMAP_I2C
+ select REGMAP_IRQ
select MFD_CORE
+ depends on I2C=y && GENERIC_HARDIRQS
help
- This supports for Marvell 88PM8606/88PM8607 Power Management IC.
- This includes the I2C driver and the core APIs _only_, you have to
- select individual components like voltage regulators, RTC and
- battery-charger under the corresponding menus.
+ Say yes here for support of Dialog Semiconductor DA9055. This is
+ a Power Management IC. This driver provides common support for
+ accessing the device as well as the I2C interface to the chip itself.
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ This driver can be built as a module. If built as a module it will be
+ called "da9055"
+
+config MFD_MC13783
+ tristate
+
+config MFD_MC13XXX
+ tristate
+ depends on (SPI_MASTER || I2C) && GENERIC_HARDIRQS
+ select MFD_CORE
+ select MFD_MC13783
+ help
+ Enable support for the Freescale MC13783 and MC13892 PMICs.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
+config MFD_MC13XXX_SPI
+ tristate "Freescale MC13783 and MC13892 SPI interface"
+ depends on SPI_MASTER && GENERIC_HARDIRQS
+ select REGMAP_SPI
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an SPI bus.
+
+config MFD_MC13XXX_I2C
+ tristate "Freescale MC13892 I2C interface"
+ depends on I2C && GENERIC_HARDIRQS
+ select REGMAP_I2C
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an I2C bus.
+
+config HTC_EGPIO
+ bool "HTC EGPIO support"
+ depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+ help
+ This driver supports the CPLD egpio chip present on
+ several HTC phones. It provides basic support for input
+ pins, output pins, and irqs.
+
+config HTC_PASIC3
+ tristate "HTC PASIC3 LED/DS1WM chip support"
+ select MFD_CORE
+ depends on GENERIC_HARDIRQS
+ help
+ This core driver provides register access for the LED/DS1WM
+ chips labeled "AIC2" and "AIC3", found on HTC Blueangel and
+ HTC Magician devices, respectively. Actual functionality is
+ handled by the leds-pasic3 and ds1wm drivers.
+
+config HTC_I2CPLD
+ bool "HTC I2C PLD chip support"
+ depends on I2C=y && GPIOLIB
+ help
+ If you say yes here you get support for the supposed CPLD
+ found on omap850 HTC devices like the HTC Wizard and HTC Herald.
+ This device provides input and output GPIOs through an I2C
+ interface to one or more sub-chips.
+
+config LPC_ICH
+ tristate "Intel ICH LPC"
+ depends on PCI && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ The LPC bridge function of the Intel ICH provides support for
+ many functional units. This driver provides needed support for
+ other drivers to control these functions, currently GPIO and
+ watchdog.
+
+config LPC_SCH
+ tristate "Intel SCH LPC"
+ depends on PCI && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ LPC bridge function of the Intel SCH provides support for
+ System Management Bus and General Purpose I/O.
+
+config MFD_INTEL_MSIC
+ bool "Intel MSIC"
+ depends on INTEL_SCU_IPC
+ select MFD_CORE
+ help
+ Select this option to enable access to Intel MSIC (Avatele
+ Passage) chip. This chip embeds audio, battery, GPIO, etc.
+ devices used in Intel Medfield platforms.
+
+config MFD_JANZ_CMODIO
+ tristate "Janz CMOD-IO PCI MODULbus Carrier Board"
+ select MFD_CORE
+ depends on PCI && GENERIC_HARDIRQS
+ help
+ This is the core driver for the Janz CMOD-IO PCI MODULbus
+ carrier board. This device is a PCI to MODULbus bridge which may
+ host many different types of MODULbus daughterboards, including
+ CAN and GPIO controllers.
+
+config MFD_JZ4740_ADC
+ bool "Janz JZ4740 ADC core"
+ select MFD_CORE
+ select GENERIC_IRQ_CHIP
+ depends on MACH_JZ4740
+ help
+ Say yes here if you want support for the ADC unit in the JZ4740 SoC.
+ This driver is necessary for jz4740-battery and jz4740-hwmon driver.
config MFD_88PM800
- tristate "Support Marvell 88PM800"
+ tristate "Marvell 88PM800"
depends on I2C=y && GENERIC_HARDIRQS
select REGMAP_I2C
select REGMAP_IRQ
@@ -34,7 +255,7 @@ config MFD_88PM800
battery-charger under the corresponding menus.
config MFD_88PM805
- tristate "Support Marvell 88PM805"
+ tristate "Marvell 88PM805"
depends on I2C=y && GENERIC_HARDIRQS
select REGMAP_I2C
select REGMAP_IRQ
@@ -45,8 +266,242 @@ config MFD_88PM805
components like codec device, headset/Mic device under the
corresponding menus.
+config MFD_88PM860X
+ bool "Marvell 88PM8606/88PM8607"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select REGMAP_I2C
+ select MFD_CORE
+ help
+ This supports for Marvell 88PM8606/88PM8607 Power Management IC.
+ This includes the I2C driver and the core APIs _only_, you have to
+ select individual components like voltage regulators, RTC and
+ battery-charger under the corresponding menus.
+
+config MFD_MAX77686
+ bool "Maxim Semiconductor MAX77686 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select IRQ_DOMAIN
+ help
+ Say yes here to support for Maxim Semiconductor MAX77686.
+ This is a Power Management IC with RTC on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+config MFD_MAX77693
+ bool "Maxim Semiconductor MAX77693 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to support for Maxim Semiconductor MAX77693.
+ This is a companion Power Management IC with Flash, Haptic, Charger,
+ and MUIC(Micro USB Interface Controller) controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+config MFD_MAX8907
+ tristate "Maxim Semiconductor MAX8907 PMIC Support"
+ select MFD_CORE
+ depends on I2C=y && GENERIC_HARDIRQS
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Say yes here to support for Maxim Semiconductor MAX8907. This is
+ a Power Management IC. This driver provides common support for
+ accessing the device; additional drivers must be enabled in order
+ to use the functionality of the device.
+
+config MFD_MAX8925
+ bool "Maxim Semiconductor MAX8925 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Say yes here to support for Maxim Semiconductor MAX8925. This is
+ a Power Management IC. This driver provides common support for
+ accessing the device, additional drivers must be enabled in order
+ to use the functionality of the device.
+
+config MFD_MAX8997
+ bool "Maxim Semiconductor MAX8997/8966 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select IRQ_DOMAIN
+ help
+ Say yes here to support for Maxim Semiconductor MAX8997/8966.
+ This is a Power Management IC with RTC, Flash, Fuel Gauge, Haptic,
+ MUIC controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+config MFD_MAX8998
+ bool "Maxim Semiconductor MAX8998/National LP3974 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Say yes here to support for Maxim Semiconductor MAX8998 and
+ National Semiconductor LP3974. This is a Power Management IC.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+config EZX_PCAP
+ bool "Motorola EZXPCAP Support"
+ depends on GENERIC_HARDIRQS && SPI_MASTER
+ help
+ This enables the PCAP ASIC present on EZX Phones. This is
+ needed for MMC, TouchScreen, Sound, USB, etc..
+
+config MFD_VIPERBOARD
+ tristate "Nano River Technologies Viperboard"
+ select MFD_CORE
+ depends on USB && GENERIC_HARDIRQS
+ default n
+ help
+ Say yes here if you want support for Nano River Technologies
+ Viperboard.
+ There are mfd cell drivers available for i2c master, adc and
+ both gpios found on the board. The spi part does not yet
+ have a driver.
+ You need to select the mfd cell drivers separately.
+ The drivers do not support all features the board exposes.
+
+config MFD_RETU
+ tristate "Nokia Retu and Tahvo multi-function device"
+ select MFD_CORE
+ depends on I2C && GENERIC_HARDIRQS
+ select REGMAP_IRQ
+ help
+ Retu and Tahvo are a multi-function devices found on Nokia
+ Internet Tablets (770, N800 and N810).
+
+config MFD_PCF50633
+ tristate "NXP PCF50633"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here if you have NXP PCF50633 chip on your board.
+ This core driver provides register access and IRQ handling
+ facilities, and registers devices for the various functions
+ so that function-specific drivers can bind to them.
+
+config PCF50633_ADC
+ tristate "NXP PCF50633 ADC"
+ depends on MFD_PCF50633
+ help
+ Say yes here if you want to include support for ADC in the
+ NXP PCF50633 chip.
+
+config PCF50633_GPIO
+ tristate "NXP PCF50633 GPIO"
+ depends on MFD_PCF50633
+ help
+ Say yes here if you want to include support GPIO for pins on
+ the PCF50633 chip.
+
+config UCB1400_CORE
+ tristate "Philips UCB1400 Core driver"
+ depends on AC97_BUS
+ depends on GPIOLIB
+ help
+ This enables support for the Philips UCB1400 core functions.
+ The UCB1400 is an AC97 audio codec.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ucb1400_core.
+
+config MFD_PM8XXX
+ tristate
+
+config MFD_PM8921_CORE
+ tristate "Qualcomm PM8921 PMIC chip"
+ depends on SSBI && BROKEN
+ select MFD_CORE
+ select MFD_PM8XXX
+ help
+ If you say yes to this option, support will be included for the
+ built-in PM8921 PMIC chip.
+
+ This is required if your board has a PM8921 and uses its features,
+ such as: MPPs, GPIOs, regulators, interrupts, and PWM.
+
+ Say M here if you want to include support for PM8921 chip as a module.
+ This will build a module called "pm8921-core".
+
+config MFD_PM8XXX_IRQ
+ bool "Qualcomm PM8xxx IRQ features"
+ depends on MFD_PM8XXX
+ default y if MFD_PM8XXX
+ help
+ This is the IRQ driver for Qualcomm PM 8xxx PMIC chips.
+
+ This is required to use certain other PM 8xxx features, such as GPIO
+ and MPP.
+
+config MFD_RDC321X
+ tristate "RDC R-321x southbridge"
+ select MFD_CORE
+ depends on PCI && GENERIC_HARDIRQS
+ help
+ Say yes here if you want to have support for the RDC R-321x SoC
+ southbridge which provides access to GPIOs and Watchdog using the
+ southbridge PCI device configuration space.
+
+config MFD_RTSX_PCI
+ tristate "Realtek PCI-E card reader"
+ depends on PCI && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5229, rtl8411, etc. Realtek card reader supports access to many
+ types of memory cards, such as Memory Stick, Memory Stick Pro,
+ Secure Digital and MultiMediaCard.
+
+config MFD_RC5T583
+ bool "Ricoh RC5T583 Power Management system device"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Select this option to get support for the RICOH583 Power
+ Management system device.
+ This driver provides common support for accessing the device
+ through i2c interface. The device supports multiple sub-devices
+ like GPIO, interrupts, RTC, LDO and DCDC regulators, onkey.
+ Additional drivers must be enabled in order to use the
+ different functionality of the device.
+
+config MFD_SEC_CORE
+ bool "SAMSUNG Electronics PMIC Series Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Support for the Samsung Electronics MFD series.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
+config MFD_SI476X_CORE
+ tristate "Silicon Laboratories 4761/64/68 AM/FM radio."
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ This is the core driver for the SI476x series of AM/FM
+ radio. This MFD driver connects the radio-si476x V4L2 module
+ and the si476x audio codec.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si476x-core.
+
config MFD_SM501
- tristate "Support for Silicon Motion SM501"
+ tristate "Silicon Motion SM501"
---help---
This is the core driver for the Silicon Motion SM501 multimedia
companion chip. This device is a multifunction device which may
@@ -63,46 +518,147 @@ config MFD_SM501_GPIO
lines on the SM501. The platform data is used to supply the
base number for the first GPIO line to register.
-config MFD_RTSX_PCI
- tristate "Support for Realtek PCI-E card reader"
- depends on PCI && GENERIC_HARDIRQS
+config MFD_SMSC
+ bool "SMSC ECE1099 series chips"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the
+ ece1099 chips from SMSC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called smsc.
+
+config ABX500_CORE
+ bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
+ default y if ARCH_U300 || ARCH_U8500
+ help
+ Say yes here if you have the ABX500 Mixed Signal IC family
+ chips. This core driver expose register access functions.
+ Functionality specific drivers using these functions can
+ remain unchanged when IC changes. Binding of the functions to
+ actual register access is done by the IC core driver.
+
+config AB3100_CORE
+ bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
+ depends on I2C=y && ABX500_CORE && GENERIC_HARDIRQS
select MFD_CORE
+ default y if ARCH_U300
help
- This supports for Realtek PCI-Express card reader including rts5209,
- rts5229, rtl8411, etc. Realtek card reader supports access to many
- types of memory cards, such as Memory Stick, Memory Stick Pro,
- Secure Digital and MultiMediaCard.
+ Select this to enable the AB3100 Mixed Signal IC core
+ functionality. This connects to a AB3100 on the I2C bus
+ and expose a number of symbols needed for dependent devices
+ to read and write registers and subscribe to events from
+ this multi-functional IC. This is needed to use other features
+ of the AB3100 such as battery-backed RTC, charging control,
+ LEDs, vibrator, system power and temperature, power management
+ and ALSA sound.
-config MFD_ASIC3
- bool "Support for Compaq ASIC3"
- depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+config AB3100_OTP
+ tristate "ST-Ericsson AB3100 OTP functions"
+ depends on AB3100_CORE
+ default y if AB3100_CORE
+ help
+ Select this to enable the AB3100 Mixed Signal IC OTP (one-time
+ programmable memory) support. This exposes a sysfs file to read
+ out OTP values.
+
+config AB8500_CORE
+ bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
+ depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
+ select POWER_SUPPLY
select MFD_CORE
- ---help---
- This driver supports the ASIC3 multifunction chip found on many
- PDAs (mainly iPAQ and HTC based ones)
+ select IRQ_DOMAIN
+ help
+ Select this option to enable access to AB8500 power management
+ chip. This connects to U8500 either on the SSP/SPI bus (deprecated
+ since hardware version v1.0) or the I2C bus via PRCMU. It also adds
+ the irq_chip parts for handling the Mixed Signal chip events.
+ This chip embeds various other multimedia funtionalities as well.
-config MFD_DAVINCI_VOICECODEC
- tristate
+config AB8500_DEBUG
+ bool "Enable debug info via debugfs"
+ depends on AB8500_CORE && DEBUG_FS
+ default y if DEBUG_FS
+ help
+ Select this option if you want debug information using the debug
+ filesystem, debugfs.
+
+config AB8500_GPADC
+ bool "ST-Ericsson AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+
+config MFD_DB8500_PRCMU
+ bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
+ depends on UX500_SOC_DB8500
select MFD_CORE
+ help
+ Select this option to enable support for the DB8500 Power Reset
+ and Control Management Unit. This is basically an autonomous
+ system controller running an XP70 microprocessor, which is accessed
+ through a register map.
-config MFD_DM355EVM_MSP
- bool "DaVinci DM355 EVM microcontroller"
- depends on I2C=y && MACH_DAVINCI_DM355_EVM
+config MFD_STMPE
+ bool "STMicroelectronics STMPE"
+ depends on (I2C=y || SPI_MASTER=y) && GENERIC_HARDIRQS
+ select MFD_CORE
help
- This driver supports the MSP430 microcontroller used on these
- boards. MSP430 firmware manages resets and power sequencing,
- inputs from buttons and the IR remote, LEDs, an RTC, and more.
+ Support for the STMPE family of I/O Expanders from
+ STMicroelectronics.
-config MFD_TI_SSP
- tristate "TI Sequencer Serial Port support"
- depends on ARCH_DAVINCI_TNETV107X && GENERIC_HARDIRQS
+ Currently supported devices are:
+
+ STMPE811: GPIO, Touchscreen
+ STMPE1601: GPIO, Keypad
+ STMPE1801: GPIO, Keypad
+ STMPE2401: GPIO, Keypad
+ STMPE2403: GPIO, Keypad
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device. Currently available sub drivers are:
+
+ GPIO: stmpe-gpio
+ Keypad: stmpe-keypad
+ Touchscreen: stmpe-ts
+
+menu "STMicroelectronics STMPE Interface Drivers"
+depends on MFD_STMPE
+
+config STMPE_I2C
+ bool "STMicroelectronics STMPE I2C Inteface"
+ depends on I2C=y
+ default y
+ help
+ This is used to enable I2C interface of STMPE
+
+config STMPE_SPI
+ bool "STMicroelectronics STMPE SPI Inteface"
+ depends on SPI_MASTER
+ help
+ This is used to enable SPI interface of STMPE
+endmenu
+
+config MFD_STA2X11
+ bool "STMicroelectronics STA2X11"
+ depends on STA2X11 && GENERIC_HARDIRQS
select MFD_CORE
- ---help---
- Say Y here if you want support for the Sequencer Serial Port
- in a Texas Instruments TNETV107X SoC.
+ select REGMAP_MMIO
- To compile this driver as a module, choose M here: the
- module will be called ti-ssp.
+config MFD_SYSCON
+ bool "System Controller Register R/W Based on Regmap"
+ select REGMAP_MMIO
+ help
+ Select this option to enable accessing system control registers
+ via regmap.
+
+config MFD_DAVINCI_VOICECODEC
+ tristate
+ select MFD_CORE
config MFD_TI_AM335X_TSCADC
tristate "TI ADC / Touch Screen chip support"
@@ -116,60 +672,56 @@ config MFD_TI_AM335X_TSCADC
To compile this driver as a module, choose M here: the
module will be called ti_am335x_tscadc.
-config HTC_EGPIO
- bool "HTC EGPIO support"
- depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+config MFD_DM355EVM_MSP
+ bool "TI DaVinci DM355 EVM microcontroller"
+ depends on I2C=y && MACH_DAVINCI_DM355_EVM
help
- This driver supports the CPLD egpio chip present on
- several HTC phones. It provides basic support for input
- pins, output pins, and irqs.
+ This driver supports the MSP430 microcontroller used on these
+ boards. MSP430 firmware manages resets and power sequencing,
+ inputs from buttons and the IR remote, LEDs, an RTC, and more.
-config HTC_PASIC3
- tristate "HTC PASIC3 LED/DS1WM chip support"
+config MFD_LP8788
+ bool "TI LP8788 Power Management Unit Driver"
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
- depends on GENERIC_HARDIRQS
- help
- This core driver provides register access for the LED/DS1WM
- chips labeled "AIC2" and "AIC3", found on HTC Blueangel and
- HTC Magician devices, respectively. Actual functionality is
- handled by the leds-pasic3 and ds1wm drivers.
-
-config HTC_I2CPLD
- bool "HTC I2C PLD chip support"
- depends on I2C=y && GPIOLIB
+ select REGMAP_I2C
+ select IRQ_DOMAIN
help
- If you say yes here you get support for the supposed CPLD
- found on omap850 HTC devices like the HTC Wizard and HTC Herald.
- This device provides input and output GPIOs through an I2C
- interface to one or more sub-chips.
+ TI LP8788 PMU supports regulators, battery charger, RTC,
+ ADC, backlight driver and current sinks.
-config UCB1400_CORE
- tristate "Philips UCB1400 Core driver"
- depends on AC97_BUS
- depends on GPIOLIB
+config MFD_OMAP_USB_HOST
+ bool "TI OMAP USBHS core and TLL driver"
+ depends on USB_EHCI_HCD_OMAP || USB_OHCI_HCD_OMAP3
+ default y
help
- This enables support for the Philips UCB1400 core functions.
- The UCB1400 is an AC97 audio codec.
-
- To compile this driver as a module, choose M here: the
- module will be called ucb1400_core.
+ This is the core driver for the OAMP EHCI and OHCI drivers.
+ This MFD driver does the required setup functionalities for
+ OMAP USB Host drivers.
-config MFD_LM3533
- tristate "LM3533 Lighting Power chip"
- depends on I2C
+config MFD_PALMAS
+ bool "TI Palmas series chips"
select MFD_CORE
select REGMAP_I2C
- depends on GENERIC_HARDIRQS
+ select REGMAP_IRQ
+ depends on I2C=y && GENERIC_HARDIRQS
help
- Say yes here to enable support for National Semiconductor / TI
- LM3533 Lighting Power chips.
+ If you say yes here you get support for the Palmas
+ series of PMIC chips from Texas Instruments.
- This driver provides common support for accessing the device;
- additional drivers must be enabled in order to use the LED,
- backlight or ambient-light-sensor functionality of the device.
+config MFD_TI_SSP
+ tristate "TI Sequencer Serial Port support"
+ depends on ARCH_DAVINCI_TNETV107X && GENERIC_HARDIRQS
+ select MFD_CORE
+ ---help---
+ Say Y here if you want support for the Sequencer Serial Port
+ in a Texas Instruments TNETV107X SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti-ssp.
config TPS6105X
- tristate "TPS61050/61052 Boost Converters"
+ tristate "TI TPS61050/61052 Boost Converters"
depends on I2C
select REGULATOR
select MFD_CORE
@@ -182,7 +734,7 @@ config TPS6105X
also contains a GPIO pin.
config TPS65010
- tristate "TPS6501x Power Management chips"
+ tristate "TI TPS6501x Power Management chips"
depends on I2C && GPIOLIB
default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK
help
@@ -195,7 +747,7 @@ config TPS65010
will be called tps65010.
config TPS6507X
- tristate "TPS6507x Power Management / Touch Screen chips"
+ tristate "TI TPS6507x Power Management / Touch Screen chips"
select MFD_CORE
depends on I2C && GENERIC_HARDIRQS
help
@@ -206,8 +758,24 @@ config TPS6507X
This driver can also be built as a module. If so, the module
will be called tps6507x.
+config TPS65911_COMPARATOR
+ tristate
+
+config MFD_TPS65090
+ bool "TI TPS65090 Power Management chips"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ If you say yes here you get support for the TPS65090 series of
+ Power Management chips.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TPS65217
- tristate "TPS65217 Power Management / White LED chips"
+ tristate "TI TPS65217 Power Management / White LED chips"
depends on I2C && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
@@ -222,7 +790,7 @@ config MFD_TPS65217
will be called tps65217.
config MFD_TPS6586X
- bool "TPS6586x Power Management chips"
+ bool "TI TPS6586x Power Management chips"
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
@@ -237,7 +805,7 @@ config MFD_TPS6586X
will be called tps6586x.
config MFD_TPS65910
- bool "TPS65910 Power Management chip"
+ bool "TI TPS65910 Power Management chip"
depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
@@ -248,11 +816,14 @@ config MFD_TPS65910
Power Management chips.
config MFD_TPS65912
- bool
+ bool "TI TPS65912 Power Management chip"
depends on GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips.
config MFD_TPS65912_I2C
- bool "TPS65912 Power Management chip with I2C"
+ bool "TI TPS65912 Power Management chip with I2C"
select MFD_CORE
select MFD_TPS65912
depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
@@ -261,7 +832,7 @@ config MFD_TPS65912_I2C
PM chips with I2C interface.
config MFD_TPS65912_SPI
- bool "TPS65912 Power Management chip with SPI"
+ bool "TI TPS65912 Power Management chip with SPI"
select MFD_CORE
select MFD_TPS65912
depends on SPI_MASTER && GPIOLIB && GENERIC_HARDIRQS
@@ -283,18 +854,8 @@ config MFD_TPS80031
ADC, RTC, 2 PWM, System Voltage Regulator/Battery Charger with
Power Path from USB, 32K clock generator.
-config MENELAUS
- bool "Texas Instruments TWL92330/Menelaus PM chip"
- depends on I2C=y && ARCH_OMAP2
- help
- If you say yes here you get support for the Texas Instruments
- TWL92330/Menelaus Power Management chip. This include voltage
- regulators, Dual slot memory card transceivers, real-time clock
- and other features that are often used in portable devices like
- cell phones and PDAs.
-
config TWL4030_CORE
- bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
+ bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
select IRQ_DOMAIN
select REGMAP_I2C
@@ -310,7 +871,7 @@ config TWL4030_CORE
versions) and many other features.
config TWL4030_MADC
- tristate "Texas Instruments TWL4030 MADC"
+ tristate "TI TWL4030 MADC"
depends on TWL4030_CORE
help
This driver provides support for triton TWL4030-MADC. The
@@ -320,7 +881,7 @@ config TWL4030_MADC
named twl4030-madc
config TWL4030_POWER
- bool "Support power resources on TWL4030 family chips"
+ bool "TI TWL4030 power resources"
depends on TWL4030_CORE && ARM
help
Say yes here if you want to use the power resources on the
@@ -333,13 +894,13 @@ config TWL4030_POWER
or reset when a sleep, wakeup or warm reset event occurs.
config MFD_TWL4030_AUDIO
- bool
+ bool "TI TWL4030 Audio"
depends on TWL4030_CORE && GENERIC_HARDIRQS
select MFD_CORE
default n
config TWL6040_CORE
- bool "Support for TWL6040 audio codec"
+ bool "TI TWL6040 audio codec"
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
@@ -352,48 +913,53 @@ config TWL6040_CORE
additional drivers must be enabled in order to use the
functionality of the device (audio, vibra).
-config MFD_STMPE
- bool "Support STMicroelectronics STMPE"
- depends on (I2C=y || SPI_MASTER=y) && GENERIC_HARDIRQS
- select MFD_CORE
+config MENELAUS
+ bool "TI TWL92330/Menelaus PM chip"
+ depends on I2C=y && ARCH_OMAP2
help
- Support for the STMPE family of I/O Expanders from
- STMicroelectronics.
-
- Currently supported devices are:
-
- STMPE811: GPIO, Touchscreen
- STMPE1601: GPIO, Keypad
- STMPE2401: GPIO, Keypad
- STMPE2403: GPIO, Keypad
+ If you say yes here you get support for the Texas Instruments
+ TWL92330/Menelaus Power Management chip. This include voltage
+ regulators, Dual slot memory card transceivers, real-time clock
+ and other features that are often used in portable devices like
+ cell phones and PDAs.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the functionality
- of the device. Currently available sub drivers are:
+config MFD_WL1273_CORE
+ tristate "TI WL1273 FM radio"
+ depends on I2C && GENERIC_HARDIRQS
+ select MFD_CORE
+ default n
+ help
+ This is the core driver for the TI WL1273 FM radio. This MFD
+ driver connects the radio-wl1273 V4L2 module and the wl1273
+ audio codec.
- GPIO: stmpe-gpio
- Keypad: stmpe-keypad
- Touchscreen: stmpe-ts
+config MFD_LM3533
+ tristate "TI/National Semiconductor LM3533 Lighting Power chip"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on GENERIC_HARDIRQS
+ help
+ Say yes here to enable support for National Semiconductor / TI
+ LM3533 Lighting Power chips.
-menu "STMPE Interface Drivers"
-depends on MFD_STMPE
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the LED,
+ backlight or ambient-light-sensor functionality of the device.
-config STMPE_I2C
- bool "STMPE I2C Inteface"
- depends on I2C=y
- default y
- help
- This is used to enable I2C interface of STMPE
+config MFD_TIMBERDALE
+ tristate "Timberdale FPGA"
+ select MFD_CORE
+ depends on PCI && GPIOLIB
+ ---help---
+ This is the core driver for the timberdale FPGA. This device is a
+ multifunction device which exposes numerous platform devices.
-config STMPE_SPI
- bool "STMPE SPI Inteface"
- depends on SPI_MASTER
- help
- This is used to enable SPI interface of STMPE
-endmenu
+ The timberdale FPGA can be found on the Intel Atom development board
+ for in-vehicle infontainment, called Russellville.
config MFD_TC3589X
- bool "Support Toshiba TC35892 and variants"
+ bool "Toshiba TC35892 and variants"
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
help
@@ -408,27 +974,15 @@ config MFD_TMIO
default n
config MFD_T7L66XB
- bool "Support Toshiba T7L66XB"
+ bool "Toshiba T7L66XB"
depends on ARM && HAVE_CLK && GENERIC_HARDIRQS
select MFD_CORE
select MFD_TMIO
help
Support for Toshiba Mobile IO Controller T7L66XB
-config MFD_SMSC
- bool "Support for the SMSC ECE1099 series chips"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_I2C
- help
- If you say yes here you get support for the
- ece1099 chips from SMSC.
-
- To compile this driver as a module, choose M here: the
- module will be called smsc.
-
config MFD_TC6387XB
- bool "Support Toshiba TC6387XB"
+ bool "Toshiba TC6387XB"
depends on ARM && HAVE_CLK
select MFD_CORE
select MFD_TMIO
@@ -436,7 +990,7 @@ config MFD_TC6387XB
Support for Toshiba Mobile IO Controller TC6387XB
config MFD_TC6393XB
- bool "Support Toshiba TC6393XB"
+ bool "Toshiba TC6393XB"
depends on ARM && HAVE_CLK
select GPIOLIB
select MFD_CORE
@@ -444,165 +998,14 @@ config MFD_TC6393XB
help
Support for Toshiba Mobile IO Controller TC6393XB
-config PMIC_DA903X
- bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
- depends on I2C=y
- help
- Say yes here to support for Dialog Semiconductor DA9030 (a.k.a
- ARAVA) and DA9034 (a.k.a MICCO), these are Power Management IC
- usually found on PXA processors-based platforms. This includes
- the I2C driver and the core APIs _only_, you have to select
- individual components like LCD backlight, voltage regulators,
- LEDs and battery-charger under the corresponding menus.
-
-config PMIC_DA9052
- bool
- select MFD_CORE
-
-config MFD_DA9052_SPI
- bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
- select REGMAP_SPI
- select REGMAP_IRQ
- select PMIC_DA9052
- depends on SPI_MASTER=y && GENERIC_HARDIRQS
- help
- Support for the Dialog Semiconductor DA9052 PMIC
- when controlled using SPI. This driver provides common support
- for accessing the device, additional drivers must be enabled in
- order to use the functionality of the device.
-
-config MFD_DA9052_I2C
- bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
- select REGMAP_I2C
- select REGMAP_IRQ
- select PMIC_DA9052
- depends on I2C=y && GENERIC_HARDIRQS
- help
- Support for the Dialog Semiconductor DA9052 PMIC
- when controlled using I2C. This driver provides common support
- for accessing the device, additional drivers must be enabled in
- order to use the functionality of the device.
-
-config MFD_DA9055
- bool "Dialog Semiconductor DA9055 PMIC Support"
- select REGMAP_I2C
- select REGMAP_IRQ
- select PMIC_DA9055
- select MFD_CORE
- depends on I2C=y && GENERIC_HARDIRQS
- help
- Say yes here for support of Dialog Semiconductor DA9055. This is
- a Power Management IC. This driver provides common support for
- accessing the device as well as the I2C interface to the chip itself.
- Additional drivers must be enabled in order to use the functionality
- of the device.
-
- This driver can be built as a module. If built as a module it will be
- called "da9055"
-
-config PMIC_ADP5520
- bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
- depends on I2C=y
- help
- Say yes here to add support for Analog Devices AD5520 and ADP5501,
- Multifunction Power Management IC. This includes
- the I2C driver and the core APIs _only_, you have to select
- individual components like LCD backlight, LEDs, GPIOs and Kepad
- under the corresponding menus.
-
-config MFD_LP8788
- bool "Texas Instruments LP8788 Power Management Unit Driver"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_I2C
- select IRQ_DOMAIN
- help
- TI LP8788 PMU supports regulators, battery charger, RTC,
- ADC, backlight driver and current sinks.
-
-config MFD_MAX77686
- bool "Maxim Semiconductor MAX77686 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_I2C
- select IRQ_DOMAIN
- help
- Say yes here to support for Maxim Semiconductor MAX77686.
- This is a Power Management IC with RTC on chip.
- This driver provides common support for accessing the device;
- additional drivers must be enabled in order to use the functionality
- of the device.
-
-config MFD_MAX77693
- bool "Maxim Semiconductor MAX77693 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_I2C
- help
- Say yes here to support for Maxim Semiconductor MAX77693.
- This is a companion Power Management IC with Flash, Haptic, Charger,
- and MUIC(Micro USB Interface Controller) controls on chip.
- This driver provides common support for accessing the device;
- additional drivers must be enabled in order to use the functionality
- of the device.
-
-config MFD_MAX8907
- tristate "Maxim Semiconductor MAX8907 PMIC Support"
- select MFD_CORE
- depends on I2C=y && GENERIC_HARDIRQS
- select REGMAP_I2C
- select REGMAP_IRQ
- help
- Say yes here to support for Maxim Semiconductor MAX8907. This is
- a Power Management IC. This driver provides common support for
- accessing the device; additional drivers must be enabled in order
- to use the functionality of the device.
-
-config MFD_MAX8925
- bool "Maxim Semiconductor MAX8925 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- help
- Say yes here to support for Maxim Semiconductor MAX8925. This is
- a Power Management IC. This driver provides common support for
- accessing the device, additional drivers must be enabled in order
- to use the functionality of the device.
-
-config MFD_MAX8997
- bool "Maxim Semiconductor MAX8997/8966 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select IRQ_DOMAIN
- help
- Say yes here to support for Maxim Semiconductor MAX8997/8966.
- This is a Power Management IC with RTC, Flash, Fuel Gauge, Haptic,
- MUIC controls on chip.
- This driver provides common support for accessing the device;
- additional drivers must be enabled in order to use the functionality
- of the device.
-
-config MFD_MAX8998
- bool "Maxim Semiconductor MAX8998/National LP3974 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- help
- Say yes here to support for Maxim Semiconductor MAX8998 and
- National Semiconductor LP3974. This is a Power Management IC.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the functionality
- of the device.
-
-config MFD_SEC_CORE
- bool "SAMSUNG Electronics PMIC Series Support"
- depends on I2C=y && GENERIC_HARDIRQS
+config MFD_VX855
+ tristate "VIA VX855/VX875 integrated south bridge"
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
- select REGMAP_I2C
- select REGMAP_IRQ
help
- Support for the Samsung Electronics MFD series.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the functionality
- of the device
+ Say yes here to enable support for various functions of the
+ VIA VX855/VX875 south bridge. You will need to enable the vx855_spi
+ and/or vx855_gpio drivers for this to do anything useful.
config MFD_ARIZONA
select REGMAP
@@ -611,7 +1014,7 @@ config MFD_ARIZONA
bool
config MFD_ARIZONA_I2C
- tristate "Support Wolfson Microelectronics Arizona platform with I2C"
+ tristate "Wolfson Microelectronics Arizona platform with I2C"
select MFD_ARIZONA
select MFD_CORE
select REGMAP_I2C
@@ -621,7 +1024,7 @@ config MFD_ARIZONA_I2C
core functionality controlled via I2C.
config MFD_ARIZONA_SPI
- tristate "Support Wolfson Microelectronics Arizona platform with SPI"
+ tristate "Wolfson Microelectronics Arizona platform with SPI"
select MFD_ARIZONA
select MFD_CORE
select REGMAP_SPI
@@ -631,19 +1034,19 @@ config MFD_ARIZONA_SPI
core functionality controlled via I2C.
config MFD_WM5102
- bool "Support Wolfson Microelectronics WM5102"
+ bool "Wolfson Microelectronics WM5102"
depends on MFD_ARIZONA
help
Support for Wolfson Microelectronics WM5102 low power audio SoC
config MFD_WM5110
- bool "Support Wolfson Microelectronics WM5110"
+ bool "Wolfson Microelectronics WM5110"
depends on MFD_ARIZONA
help
Support for Wolfson Microelectronics WM5110 low power audio SoC
config MFD_WM8400
- bool "Support Wolfson Microelectronics WM8400"
+ bool "Wolfson Microelectronics WM8400"
select MFD_CORE
depends on I2C=y && GENERIC_HARDIRQS
select REGMAP_I2C
@@ -658,7 +1061,7 @@ config MFD_WM831X
depends on GENERIC_HARDIRQS
config MFD_WM831X_I2C
- bool "Support Wolfson Microelectronics WM831x/2x PMICs with I2C"
+ bool "Wolfson Microelectronics WM831x/2x PMICs with I2C"
select MFD_CORE
select MFD_WM831X
select REGMAP_I2C
@@ -671,7 +1074,7 @@ config MFD_WM831X_I2C
order to use the functionality of the device.
config MFD_WM831X_SPI
- bool "Support Wolfson Microelectronics WM831x/2x PMICs with SPI"
+ bool "Wolfson Microelectronics WM831x/2x PMICs with SPI"
select MFD_CORE
select MFD_WM831X
select REGMAP_SPI
@@ -687,56 +1090,8 @@ config MFD_WM8350
bool
depends on GENERIC_HARDIRQS
-config MFD_WM8350_CONFIG_MODE_0
- bool
- depends on MFD_WM8350
-
-config MFD_WM8350_CONFIG_MODE_1
- bool
- depends on MFD_WM8350
-
-config MFD_WM8350_CONFIG_MODE_2
- bool
- depends on MFD_WM8350
-
-config MFD_WM8350_CONFIG_MODE_3
- bool
- depends on MFD_WM8350
-
-config MFD_WM8351_CONFIG_MODE_0
- bool
- depends on MFD_WM8350
-
-config MFD_WM8351_CONFIG_MODE_1
- bool
- depends on MFD_WM8350
-
-config MFD_WM8351_CONFIG_MODE_2
- bool
- depends on MFD_WM8350
-
-config MFD_WM8351_CONFIG_MODE_3
- bool
- depends on MFD_WM8350
-
-config MFD_WM8352_CONFIG_MODE_0
- bool
- depends on MFD_WM8350
-
-config MFD_WM8352_CONFIG_MODE_1
- bool
- depends on MFD_WM8350
-
-config MFD_WM8352_CONFIG_MODE_2
- bool
- depends on MFD_WM8350
-
-config MFD_WM8352_CONFIG_MODE_3
- bool
- depends on MFD_WM8350
-
config MFD_WM8350_I2C
- bool "Support Wolfson Microelectronics WM8350 with I2C"
+ bool "Wolfson Microelectronics WM8350 with I2C"
select MFD_WM8350
depends on I2C=y && GENERIC_HARDIRQS
help
@@ -747,7 +1102,7 @@ config MFD_WM8350_I2C
selected to enable support for the functionality of the chip.
config MFD_WM8994
- bool "Support Wolfson Microelectronics WM8994"
+ bool "Wolfson Microelectronics WM8994"
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -760,365 +1115,6 @@ config MFD_WM8994
core support for the WM8994, in order to use the actual
functionaltiy of the device other drivers must be enabled.
-config MFD_PCF50633
- tristate "Support for NXP PCF50633"
- depends on I2C
- select REGMAP_I2C
- help
- Say yes here if you have NXP PCF50633 chip on your board.
- This core driver provides register access and IRQ handling
- facilities, and registers devices for the various functions
- so that function-specific drivers can bind to them.
-
-config PCF50633_ADC
- tristate "Support for NXP PCF50633 ADC"
- depends on MFD_PCF50633
- help
- Say yes here if you want to include support for ADC in the
- NXP PCF50633 chip.
-
-config PCF50633_GPIO
- tristate "Support for NXP PCF50633 GPIO"
- depends on MFD_PCF50633
- help
- Say yes here if you want to include support GPIO for pins on
- the PCF50633 chip.
-
-config MFD_MC13783
- tristate
-
-config MFD_MC13XXX
- tristate
- depends on (SPI_MASTER || I2C) && GENERIC_HARDIRQS
- select MFD_CORE
- select MFD_MC13783
- help
- Enable support for the Freescale MC13783 and MC13892 PMICs.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the
- functionality of the device.
-
-config MFD_MC13XXX_SPI
- tristate "Freescale MC13783 and MC13892 SPI interface"
- depends on SPI_MASTER && GENERIC_HARDIRQS
- select REGMAP_SPI
- select MFD_MC13XXX
- help
- Select this if your MC13xxx is connected via an SPI bus.
-
-config MFD_MC13XXX_I2C
- tristate "Freescale MC13892 I2C interface"
- depends on I2C && GENERIC_HARDIRQS
- select REGMAP_I2C
- select MFD_MC13XXX
- help
- Select this if your MC13xxx is connected via an I2C bus.
-
-config ABX500_CORE
- bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
- default y if ARCH_U300 || ARCH_U8500
- help
- Say yes here if you have the ABX500 Mixed Signal IC family
- chips. This core driver expose register access functions.
- Functionality specific drivers using these functions can
- remain unchanged when IC changes. Binding of the functions to
- actual register access is done by the IC core driver.
-
-config AB3100_CORE
- bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y && ABX500_CORE && GENERIC_HARDIRQS
- select MFD_CORE
- default y if ARCH_U300
- help
- Select this to enable the AB3100 Mixed Signal IC core
- functionality. This connects to a AB3100 on the I2C bus
- and expose a number of symbols needed for dependent devices
- to read and write registers and subscribe to events from
- this multi-functional IC. This is needed to use other features
- of the AB3100 such as battery-backed RTC, charging control,
- LEDs, vibrator, system power and temperature, power management
- and ALSA sound.
-
-config AB3100_OTP
- tristate "ST-Ericsson AB3100 OTP functions"
- depends on AB3100_CORE
- default y if AB3100_CORE
- help
- Select this to enable the AB3100 Mixed Signal IC OTP (one-time
- programmable memory) support. This exposes a sysfs file to read
- out OTP values.
-
-config EZX_PCAP
- bool "PCAP Support"
- depends on GENERIC_HARDIRQS && SPI_MASTER
- help
- This enables the PCAP ASIC present on EZX Phones. This is
- needed for MMC, TouchScreen, Sound, USB, etc..
-
-config AB8500_CORE
- bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
- select POWER_SUPPLY
- select MFD_CORE
- select IRQ_DOMAIN
- help
- Select this option to enable access to AB8500 power management
- chip. This connects to U8500 either on the SSP/SPI bus (deprecated
- since hardware version v1.0) or the I2C bus via PRCMU. It also adds
- the irq_chip parts for handling the Mixed Signal chip events.
- This chip embeds various other multimedia funtionalities as well.
-
-config AB8500_DEBUG
- bool "Enable debug info via debugfs"
- depends on AB8500_CORE && DEBUG_FS
- default y if DEBUG_FS
- help
- Select this option if you want debug information using the debug
- filesystem, debugfs.
-
-config AB8500_GPADC
- bool "AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
- default y
- help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
-
-config MFD_DB8500_PRCMU
- bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
- depends on UX500_SOC_DB8500
- select MFD_CORE
- help
- Select this option to enable support for the DB8500 Power Reset
- and Control Management Unit. This is basically an autonomous
- system controller running an XP70 microprocessor, which is accessed
- through a register map.
-
-config MFD_CS5535
- tristate "Support for CS5535 and CS5536 southbridge core functions"
- select MFD_CORE
- depends on PCI && X86
- ---help---
- This is the core driver for CS5535/CS5536 MFD functions. This is
- necessary for using the board's GPIO and MFGPT functionality.
-
-config MFD_TIMBERDALE
- tristate "Support for the Timberdale FPGA"
- select MFD_CORE
- depends on PCI && GPIOLIB
- ---help---
- This is the core driver for the timberdale FPGA. This device is a
- multifunction device which exposes numerous platform devices.
-
- The timberdale FPGA can be found on the Intel Atom development board
- for in-vehicle infontainment, called Russellville.
-
-config LPC_SCH
- tristate "Intel SCH LPC"
- depends on PCI && GENERIC_HARDIRQS
- select MFD_CORE
- help
- LPC bridge function of the Intel SCH provides support for
- System Management Bus and General Purpose I/O.
-
-config LPC_ICH
- tristate "Intel ICH LPC"
- depends on PCI && GENERIC_HARDIRQS
- select MFD_CORE
- help
- The LPC bridge function of the Intel ICH provides support for
- many functional units. This driver provides needed support for
- other drivers to control these functions, currently GPIO and
- watchdog.
-
-config MFD_RDC321X
- tristate "Support for RDC-R321x southbridge"
- select MFD_CORE
- depends on PCI && GENERIC_HARDIRQS
- help
- Say yes here if you want to have support for the RDC R-321x SoC
- southbridge which provides access to GPIOs and Watchdog using the
- southbridge PCI device configuration space.
-
-config MFD_JANZ_CMODIO
- tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
- select MFD_CORE
- depends on PCI && GENERIC_HARDIRQS
- help
- This is the core driver for the Janz CMOD-IO PCI MODULbus
- carrier board. This device is a PCI to MODULbus bridge which may
- host many different types of MODULbus daughterboards, including
- CAN and GPIO controllers.
-
-config MFD_JZ4740_ADC
- bool "Support for the JZ4740 SoC ADC core"
- select MFD_CORE
- select GENERIC_IRQ_CHIP
- depends on MACH_JZ4740
- help
- Say yes here if you want support for the ADC unit in the JZ4740 SoC.
- This driver is necessary for jz4740-battery and jz4740-hwmon driver.
-
-config MFD_VX855
- tristate "Support for VIA VX855/VX875 integrated south bridge"
- depends on PCI && GENERIC_HARDIRQS
- select MFD_CORE
- help
- Say yes here to enable support for various functions of the
- VIA VX855/VX875 south bridge. You will need to enable the vx855_spi
- and/or vx855_gpio drivers for this to do anything useful.
-
-config MFD_WL1273_CORE
- tristate "Support for TI WL1273 FM radio."
- depends on I2C && GENERIC_HARDIRQS
- select MFD_CORE
- default n
- help
- This is the core driver for the TI WL1273 FM radio. This MFD
- driver connects the radio-wl1273 V4L2 module and the wl1273
- audio codec.
-
-config MFD_OMAP_USB_HOST
- bool "Support OMAP USBHS core and TLL driver"
- depends on USB_EHCI_HCD_OMAP || USB_OHCI_HCD_OMAP3
- default y
- help
- This is the core driver for the OAMP EHCI and OHCI drivers.
- This MFD driver does the required setup functionalities for
- OMAP USB Host drivers.
-
-config MFD_PM8XXX
- tristate
-
-config MFD_PM8921_CORE
- tristate "Qualcomm PM8921 PMIC chip"
- depends on SSBI && BROKEN
- select MFD_CORE
- select MFD_PM8XXX
- help
- If you say yes to this option, support will be included for the
- built-in PM8921 PMIC chip.
-
- This is required if your board has a PM8921 and uses its features,
- such as: MPPs, GPIOs, regulators, interrupts, and PWM.
-
- Say M here if you want to include support for PM8921 chip as a module.
- This will build a module called "pm8921-core".
-
-config MFD_PM8XXX_IRQ
- bool "Support for Qualcomm PM8xxx IRQ features"
- depends on MFD_PM8XXX
- default y if MFD_PM8XXX
- help
- This is the IRQ driver for Qualcomm PM 8xxx PMIC chips.
-
- This is required to use certain other PM 8xxx features, such as GPIO
- and MPP.
-
-config TPS65911_COMPARATOR
- tristate
-
-config MFD_TPS65090
- bool "TPS65090 Power Management chips"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_I2C
- select REGMAP_IRQ
- help
- If you say yes here you get support for the TPS65090 series of
- Power Management chips.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the
- functionality of the device.
-
-config MFD_AAT2870_CORE
- bool "Support for the AnalogicTech AAT2870"
- select MFD_CORE
- depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
- help
- If you say yes here you get support for the AAT2870.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the
- functionality of the device.
-
-config MFD_INTEL_MSIC
- bool "Support for Intel MSIC"
- depends on INTEL_SCU_IPC
- select MFD_CORE
- help
- Select this option to enable access to Intel MSIC (Avatele
- Passage) chip. This chip embeds audio, battery, GPIO, etc.
- devices used in Intel Medfield platforms.
-
-config MFD_RC5T583
- bool "Ricoh RC5T583 Power Management system device"
- depends on I2C=y && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_I2C
- help
- Select this option to get support for the RICOH583 Power
- Management system device.
- This driver provides common support for accessing the device
- through i2c interface. The device supports multiple sub-devices
- like GPIO, interrupts, RTC, LDO and DCDC regulators, onkey.
- Additional drivers must be enabled in order to use the
- different functionality of the device.
-
-config MFD_STA2X11
- bool "STA2X11 multi function device support"
- depends on STA2X11 && GENERIC_HARDIRQS
- select MFD_CORE
- select REGMAP_MMIO
-
-config MFD_SYSCON
- bool "System Controller Register R/W Based on Regmap"
- depends on OF
- select REGMAP_MMIO
- help
- Select this option to enable accessing system control registers
- via regmap.
-
-config MFD_PALMAS
- bool "Support for the TI Palmas series chips"
- select MFD_CORE
- select REGMAP_I2C
- select REGMAP_IRQ
- depends on I2C=y && GENERIC_HARDIRQS
- help
- If you say yes here you get support for the Palmas
- series of PMIC chips from Texas Instruments.
-
-config MFD_VIPERBOARD
- tristate "Support for Nano River Technologies Viperboard"
- select MFD_CORE
- depends on USB && GENERIC_HARDIRQS
- default n
- help
- Say yes here if you want support for Nano River Technologies
- Viperboard.
- There are mfd cell drivers available for i2c master, adc and
- both gpios found on the board. The spi part does not yet
- have a driver.
- You need to select the mfd cell drivers separately.
- The drivers do not support all features the board exposes.
-
-config MFD_RETU
- tristate "Support for Retu multi-function device"
- select MFD_CORE
- depends on I2C && GENERIC_HARDIRQS
- select REGMAP_IRQ
- help
- Retu is a multi-function device found on Nokia Internet Tablets
- (770, N800 and N810).
-
-config MFD_AS3711
- bool "Support for AS3711"
- select MFD_CORE
- select REGMAP_I2C
- select REGMAP_IRQ
- depends on I2C=y && GENERIC_HARDIRQS
- help
- Support for the AS3711 PMIC from AMS
-
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b90409c..718e94a 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
+obj-$(CONFIG_MFD_CROS_EC) += cros_ec.o
+obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
+obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
@@ -131,6 +134,10 @@ obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
obj-$(CONFIG_MFD_VX855) += vx855.o
obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o
+
+si476x-core-y := si476x-cmd.o si476x-prop.o si476x-i2c.o
+obj-$(CONFIG_MFD_SI476X_CORE) += si476x-core.o
+
obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index f1beb49..dfdb0a2 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -367,12 +367,12 @@ static int aat2870_i2c_probe(struct i2c_client *client,
int i, j;
int ret = 0;
- aat2870 = kzalloc(sizeof(struct aat2870_data), GFP_KERNEL);
+ aat2870 = devm_kzalloc(&client->dev, sizeof(struct aat2870_data),
+ GFP_KERNEL);
if (!aat2870) {
dev_err(&client->dev,
"Failed to allocate memory for aat2870\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
aat2870->dev = &client->dev;
@@ -400,12 +400,12 @@ static int aat2870_i2c_probe(struct i2c_client *client,
aat2870->init(aat2870);
if (aat2870->en_pin >= 0) {
- ret = gpio_request_one(aat2870->en_pin, GPIOF_OUT_INIT_HIGH,
- "aat2870-en");
+ ret = devm_gpio_request_one(&client->dev, aat2870->en_pin,
+ GPIOF_OUT_INIT_HIGH, "aat2870-en");
if (ret < 0) {
dev_err(&client->dev,
"Failed to request GPIO %d\n", aat2870->en_pin);
- goto out_kfree;
+ return ret;
}
}
@@ -436,11 +436,6 @@ static int aat2870_i2c_probe(struct i2c_client *client,
out_disable:
aat2870_disable(aat2870);
- if (aat2870->en_pin >= 0)
- gpio_free(aat2870->en_pin);
-out_kfree:
- kfree(aat2870);
-out:
return ret;
}
@@ -452,11 +447,8 @@ static int aat2870_i2c_remove(struct i2c_client *client)
mfd_remove_devices(aat2870->dev);
aat2870_disable(aat2870);
- if (aat2870->en_pin >= 0)
- gpio_free(aat2870->en_pin);
if (aat2870->uninit)
aat2870->uninit(aat2870);
- kfree(aat2870);
return 0;
}
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index 8440010..d7ce016 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -248,19 +248,7 @@ static struct platform_driver ab3100_otp_driver = {
.remove = __exit_p(ab3100_otp_remove),
};
-static int __init ab3100_otp_init(void)
-{
- return platform_driver_probe(&ab3100_otp_driver,
- ab3100_otp_probe);
-}
-
-static void __exit ab3100_otp_exit(void)
-{
- platform_driver_unregister(&ab3100_otp_driver);
-}
-
-module_init(ab3100_otp_init);
-module_exit(ab3100_otp_exit);
+module_platform_driver_probe(ab3100_otp_driver, ab3100_otp_probe);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
MODULE_DESCRIPTION("AB3100 OTP Readout Driver");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index f276352..8e8a016 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -458,22 +458,23 @@ static void update_latch_offset(u8 *offset, int i)
static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
int latch_offset, u8 latch_val)
{
- int int_bit = __ffs(latch_val);
- int line, i;
+ int int_bit, line, i;
- do {
- int_bit = __ffs(latch_val);
+ for (i = 0; i < ab8500->mask_size; i++)
+ if (ab8500->irq_reg_offset[i] == latch_offset)
+ break;
- for (i = 0; i < ab8500->mask_size; i++)
- if (ab8500->irq_reg_offset[i] == latch_offset)
- break;
+ if (i >= ab8500->mask_size) {
+ dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
+ latch_offset);
+ return -ENXIO;
+ }
- if (i >= ab8500->mask_size) {
- dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
- latch_offset);
- return -ENXIO;
- }
+ /* ignore masked out interrupts */
+ latch_val &= ~ab8500->mask[i];
+ while (latch_val) {
+ int_bit = __ffs(latch_val);
line = (i << 3) + int_bit;
latch_val &= ~(1 << int_bit);
@@ -491,7 +492,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
line += 1;
handle_nested_irq(ab8500->irq_base + line);
- } while (latch_val);
+ }
return 0;
}
@@ -1107,6 +1108,7 @@ static struct mfd_cell ab8500_devs[] = {
},
{
.name = "ab8500-usb",
+ .of_compatible = "stericsson,ab8500-usb",
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 65f7228..5e65b28 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -332,7 +332,7 @@ if (ad_value < 0) {
return voltage;
}
-EXPORT_SYMBOL(ab8500_gpadc_convert);
+EXPORT_SYMBOL(ab8500_gpadc_sw_hw_convert);
/**
* ab8500_gpadc_read_raw() - gpadc read
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 272479c..fbca1ce 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -242,7 +242,7 @@ static int __init ab8500_sysctrl_init(void)
{
return platform_driver_register(&ab8500_sysctrl_driver);
}
-subsys_initcall(ab8500_sysctrl_init);
+arch_initcall(ab8500_sysctrl_init);
MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
MODULE_DESCRIPTION("AB8500 system control driver");
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 210dd03..0d2eba0 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -36,6 +36,7 @@ struct adp5520_chip {
struct blocking_notifier_head notifier_list;
int irq;
unsigned long id;
+ uint8_t mode;
};
static int __adp5520_read(struct i2c_client *client,
@@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
- adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode);
+ /* All other bits are W1C */
+ chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY;
+ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
return 0;
}
@@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
- adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode);
return 0;
}
#endif
@@ -360,17 +364,7 @@ static struct i2c_driver adp5520_driver = {
.id_table = adp5520_id,
};
-static int __init adp5520_init(void)
-{
- return i2c_add_driver(&adp5520_driver);
-}
-module_init(adp5520_init);
-
-static void __exit adp5520_exit(void)
-{
- i2c_del_driver(&adp5520_driver);
-}
-module_exit(adp5520_exit);
+module_i2c_driver(adp5520_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index b562c7b..6ab0304 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -39,11 +39,21 @@ int arizona_clk32k_enable(struct arizona *arizona)
arizona->clk32k_ref++;
- if (arizona->clk32k_ref == 1)
+ if (arizona->clk32k_ref == 1) {
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ ret = pm_runtime_get_sync(arizona->dev);
+ if (ret != 0)
+ goto out;
+ break;
+ }
+
ret = regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
ARIZONA_CLK_32K_ENA,
ARIZONA_CLK_32K_ENA);
+ }
+out:
if (ret != 0)
arizona->clk32k_ref--;
@@ -63,10 +73,17 @@ int arizona_clk32k_disable(struct arizona *arizona)
arizona->clk32k_ref--;
- if (arizona->clk32k_ref == 0)
+ if (arizona->clk32k_ref == 0) {
regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
ARIZONA_CLK_32K_ENA, 0);
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ pm_runtime_put_sync(arizona->dev);
+ break;
+ }
+ }
+
mutex_unlock(&arizona->clk_lock);
return ret;
@@ -179,42 +196,134 @@ static irqreturn_t arizona_overclocked(int irq, void *data)
return IRQ_HANDLED;
}
-static int arizona_wait_for_boot(struct arizona *arizona)
+static int arizona_poll_reg(struct arizona *arizona,
+ int timeout, unsigned int reg,
+ unsigned int mask, unsigned int target)
{
- unsigned int reg;
+ unsigned int val = 0;
int ret, i;
+ for (i = 0; i < timeout; i++) {
+ ret = regmap_read(arizona->regmap, reg, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read reg %u: %d\n",
+ reg, ret);
+ continue;
+ }
+
+ if ((val & mask) == target)
+ return 0;
+
+ msleep(1);
+ }
+
+ dev_err(arizona->dev, "Polling reg %u timed out: %x\n", reg, val);
+ return -ETIMEDOUT;
+}
+
+static int arizona_wait_for_boot(struct arizona *arizona)
+{
+ int ret;
+
/*
* We can't use an interrupt as we need to runtime resume to do so,
* we won't race with the interrupt handler as it'll be blocked on
* runtime resume.
*/
- for (i = 0; i < 5; i++) {
- msleep(1);
+ ret = arizona_poll_reg(arizona, 5, ARIZONA_INTERRUPT_RAW_STATUS_5,
+ ARIZONA_BOOT_DONE_STS, ARIZONA_BOOT_DONE_STS);
- ret = regmap_read(arizona->regmap,
- ARIZONA_INTERRUPT_RAW_STATUS_5, &reg);
- if (ret != 0) {
- dev_err(arizona->dev, "Failed to read boot state: %d\n",
- ret);
- continue;
- }
+ if (!ret)
+ regmap_write(arizona->regmap, ARIZONA_INTERRUPT_STATUS_5,
+ ARIZONA_BOOT_DONE_STS);
- if (reg & ARIZONA_BOOT_DONE_STS)
- break;
+ pm_runtime_mark_last_busy(arizona->dev);
+
+ return ret;
+}
+
+static int arizona_apply_hardware_patch(struct arizona* arizona)
+{
+ unsigned int fll, sysclk;
+ int ret, err;
+
+ regcache_cache_bypass(arizona->regmap, true);
+
+ /* Cache existing FLL and SYSCLK settings */
+ ret = regmap_read(arizona->regmap, ARIZONA_FLL1_CONTROL_1, &fll);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to cache FLL settings: %d\n",
+ ret);
+ return ret;
+ }
+ ret = regmap_read(arizona->regmap, ARIZONA_SYSTEM_CLOCK_1, &sysclk);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to cache SYSCLK settings: %d\n",
+ ret);
+ return ret;
}
- if (reg & ARIZONA_BOOT_DONE_STS) {
- regmap_write(arizona->regmap, ARIZONA_INTERRUPT_STATUS_5,
- ARIZONA_BOOT_DONE_STS);
- } else {
- dev_err(arizona->dev, "Device boot timed out: %x\n", reg);
- return -ETIMEDOUT;
+ /* Start up SYSCLK using the FLL in free running mode */
+ ret = regmap_write(arizona->regmap, ARIZONA_FLL1_CONTROL_1,
+ ARIZONA_FLL1_ENA | ARIZONA_FLL1_FREERUN);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to start FLL in freerunning mode: %d\n",
+ ret);
+ return ret;
+ }
+ ret = arizona_poll_reg(arizona, 25, ARIZONA_INTERRUPT_RAW_STATUS_5,
+ ARIZONA_FLL1_CLOCK_OK_STS,
+ ARIZONA_FLL1_CLOCK_OK_STS);
+ if (ret != 0) {
+ ret = -ETIMEDOUT;
+ goto err_fll;
}
- pm_runtime_mark_last_busy(arizona->dev);
+ ret = regmap_write(arizona->regmap, ARIZONA_SYSTEM_CLOCK_1, 0x0144);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to start SYSCLK: %d\n", ret);
+ goto err_fll;
+ }
- return 0;
+ /* Start the write sequencer and wait for it to finish */
+ ret = regmap_write(arizona->regmap, ARIZONA_WRITE_SEQUENCER_CTRL_0,
+ ARIZONA_WSEQ_ENA | ARIZONA_WSEQ_START | 160);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to start write sequencer: %d\n",
+ ret);
+ goto err_sysclk;
+ }
+ ret = arizona_poll_reg(arizona, 5, ARIZONA_WRITE_SEQUENCER_CTRL_1,
+ ARIZONA_WSEQ_BUSY, 0);
+ if (ret != 0) {
+ regmap_write(arizona->regmap, ARIZONA_WRITE_SEQUENCER_CTRL_0,
+ ARIZONA_WSEQ_ABORT);
+ ret = -ETIMEDOUT;
+ }
+
+err_sysclk:
+ err = regmap_write(arizona->regmap, ARIZONA_SYSTEM_CLOCK_1, sysclk);
+ if (err != 0) {
+ dev_err(arizona->dev,
+ "Failed to re-apply old SYSCLK settings: %d\n",
+ err);
+ }
+
+err_fll:
+ err = regmap_write(arizona->regmap, ARIZONA_FLL1_CONTROL_1, fll);
+ if (err != 0) {
+ dev_err(arizona->dev,
+ "Failed to re-apply old FLL settings: %d\n",
+ err);
+ }
+
+ regcache_cache_bypass(arizona->regmap, false);
+
+ if (ret != 0)
+ return ret;
+ else
+ return err;
}
#ifdef CONFIG_PM_RUNTIME
@@ -233,20 +342,44 @@ static int arizona_runtime_resume(struct device *dev)
regcache_cache_only(arizona->regmap, false);
- ret = arizona_wait_for_boot(arizona);
- if (ret != 0) {
- regulator_disable(arizona->dcvdd);
- return ret;
+ switch (arizona->type) {
+ case WM5102:
+ ret = wm5102_patch(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to apply patch: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = arizona_apply_hardware_patch(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to apply hardware patch: %d\n",
+ ret);
+ goto err;
+ }
+ break;
+ default:
+ ret = arizona_wait_for_boot(arizona);
+ if (ret != 0) {
+ goto err;
+ }
+
+ break;
}
ret = regcache_sync(arizona->regmap);
if (ret != 0) {
dev_err(arizona->dev, "Failed to restore register cache\n");
- regulator_disable(arizona->dcvdd);
- return ret;
+ goto err;
}
return 0;
+
+err:
+ regcache_cache_only(arizona->regmap, true);
+ regulator_disable(arizona->dcvdd);
+ return ret;
}
static int arizona_runtime_suspend(struct device *dev)
@@ -371,6 +504,17 @@ int arizona_dev_init(struct arizona *arizona)
goto err_early;
}
+ if (arizona->pdata.reset) {
+ /* Start out with /RESET low to put the chip into reset */
+ ret = gpio_request_one(arizona->pdata.reset,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+ "arizona /RESET");
+ if (ret != 0) {
+ dev_err(dev, "Failed to request /RESET: %d\n", ret);
+ goto err_early;
+ }
+ }
+
ret = regulator_bulk_enable(arizona->num_core_supplies,
arizona->core_supplies);
if (ret != 0) {
@@ -386,16 +530,8 @@ int arizona_dev_init(struct arizona *arizona)
}
if (arizona->pdata.reset) {
- /* Start out with /RESET low to put the chip into reset */
- ret = gpio_request_one(arizona->pdata.reset,
- GPIOF_DIR_OUT | GPIOF_INIT_LOW,
- "arizona /RESET");
- if (ret != 0) {
- dev_err(dev, "Failed to request /RESET: %d\n", ret);
- goto err_dcvdd;
- }
-
gpio_set_value_cansleep(arizona->pdata.reset, 1);
+ msleep(1);
}
regcache_cache_only(arizona->regmap, false);
@@ -424,6 +560,7 @@ int arizona_dev_init(struct arizona *arizona)
arizona->type = WM5102;
}
apply_patch = wm5102_patch;
+ arizona->rev &= 0x7;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -454,6 +591,8 @@ int arizona_dev_init(struct arizona *arizona)
goto err_reset;
}
+ msleep(1);
+
ret = regcache_sync(arizona->regmap);
if (ret != 0) {
dev_err(dev, "Failed to sync device: %d\n", ret);
@@ -461,10 +600,24 @@ int arizona_dev_init(struct arizona *arizona)
}
}
- ret = arizona_wait_for_boot(arizona);
- if (ret != 0) {
- dev_err(arizona->dev, "Device failed initial boot: %d\n", ret);
- goto err_reset;
+ switch (arizona->type) {
+ case WM5102:
+ ret = regmap_read(arizona->regmap, 0x19, &val);
+ if (ret != 0)
+ dev_err(dev,
+ "Failed to check write sequencer state: %d\n",
+ ret);
+ else if (val & 0x01)
+ break;
+ /* Fall through */
+ default:
+ ret = arizona_wait_for_boot(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Device failed initial boot: %d\n", ret);
+ goto err_reset;
+ }
+ break;
}
if (apply_patch) {
@@ -474,6 +627,20 @@ int arizona_dev_init(struct arizona *arizona)
ret);
goto err_reset;
}
+
+ switch (arizona->type) {
+ case WM5102:
+ ret = arizona_apply_hardware_patch(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to apply hardware patch: %d\n",
+ ret);
+ goto err_reset;
+ }
+ break;
+ default:
+ break;
+ }
}
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
@@ -498,6 +665,7 @@ int arizona_dev_init(struct arizona *arizona)
regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
ARIZONA_CLK_32K_SRC_MASK,
arizona->pdata.clk32k_src - 1);
+ arizona_clk32k_enable(arizona);
break;
case ARIZONA_32KZ_NONE:
regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
@@ -511,10 +679,16 @@ int arizona_dev_init(struct arizona *arizona)
}
for (i = 0; i < ARIZONA_MAX_MICBIAS; i++) {
- if (!arizona->pdata.micbias[i].mV)
+ if (!arizona->pdata.micbias[i].mV &&
+ !arizona->pdata.micbias[i].bypass)
continue;
+ /* Apply default for bypass mode */
+ if (!arizona->pdata.micbias[i].mV)
+ arizona->pdata.micbias[i].mV = 2800;
+
val = (arizona->pdata.micbias[i].mV - 1500) / 100;
+
val <<= ARIZONA_MICB1_LVL_SHIFT;
if (arizona->pdata.micbias[i].ext_cap)
@@ -526,10 +700,14 @@ int arizona_dev_init(struct arizona *arizona)
if (arizona->pdata.micbias[i].fast_start)
val |= ARIZONA_MICB1_RATE;
+ if (arizona->pdata.micbias[i].bypass)
+ val |= ARIZONA_MICB1_BYPASS;
+
regmap_update_bits(arizona->regmap,
ARIZONA_MIC_BIAS_CTRL_1 + i,
ARIZONA_MICB1_LVL_MASK |
ARIZONA_MICB1_DISCH |
+ ARIZONA_MICB1_BYPASS |
ARIZONA_MICB1_RATE, val);
}
@@ -610,10 +788,9 @@ err_irq:
arizona_irq_exit(arizona);
err_reset:
if (arizona->pdata.reset) {
- gpio_set_value_cansleep(arizona->pdata.reset, 1);
+ gpio_set_value_cansleep(arizona->pdata.reset, 0);
gpio_free(arizona->pdata.reset);
}
-err_dcvdd:
regulator_disable(arizona->dcvdd);
err_enable:
regulator_bulk_disable(arizona->num_core_supplies,
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 2bec5f0..64cd9b6 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -94,6 +94,7 @@ static irqreturn_t arizona_ctrlif_err(int irq, void *data)
static irqreturn_t arizona_irq_thread(int irq, void *data)
{
struct arizona *arizona = data;
+ bool poll;
unsigned int val;
int ret;
@@ -103,20 +104,39 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
return IRQ_NONE;
}
- /* Always handle the AoD domain */
- handle_nested_irq(irq_find_mapping(arizona->virq, 0));
+ do {
+ poll = false;
+
+ /* Always handle the AoD domain */
+ handle_nested_irq(irq_find_mapping(arizona->virq, 0));
+
+ /*
+ * Check if one of the main interrupts is asserted and only
+ * check that domain if it is.
+ */
+ ret = regmap_read(arizona->regmap, ARIZONA_IRQ_PIN_STATUS,
+ &val);
+ if (ret == 0 && val & ARIZONA_IRQ1_STS) {
+ handle_nested_irq(irq_find_mapping(arizona->virq, 1));
+ } else if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to read main IRQ status: %d\n", ret);
+ }
- /*
- * Check if one of the main interrupts is asserted and only
- * check that domain if it is.
- */
- ret = regmap_read(arizona->regmap, ARIZONA_IRQ_PIN_STATUS, &val);
- if (ret == 0 && val & ARIZONA_IRQ1_STS) {
- handle_nested_irq(irq_find_mapping(arizona->virq, 1));
- } else if (ret != 0) {
- dev_err(arizona->dev, "Failed to read main IRQ status: %d\n",
- ret);
- }
+ /*
+ * Poll the IRQ pin status to see if we're really done
+ * if the interrupt controller can't do it for us.
+ */
+ if (!arizona->pdata.irq_gpio) {
+ break;
+ } else if (arizona->pdata.irq_flags & IRQF_TRIGGER_RISING &&
+ gpio_get_value_cansleep(arizona->pdata.irq_gpio)) {
+ poll = true;
+ } else if (arizona->pdata.irq_flags & IRQF_TRIGGER_FALLING &&
+ !gpio_get_value_cansleep(arizona->pdata.irq_gpio)) {
+ poll = true;
+ }
+ } while (poll);
pm_runtime_mark_last_busy(arizona->dev);
pm_runtime_put_autosuspend(arizona->dev);
@@ -169,6 +189,7 @@ int arizona_irq_init(struct arizona *arizona)
int ret, i;
const struct regmap_irq_chip *aod, *irq;
bool ctrlif_error = true;
+ struct irq_data *irq_data;
switch (arizona->type) {
#ifdef CONFIG_MFD_WM5102
@@ -192,7 +213,36 @@ int arizona_irq_init(struct arizona *arizona)
return -EINVAL;
}
- if (arizona->pdata.irq_active_high) {
+ /* Disable all wake sources by default */
+ regmap_write(arizona->regmap, ARIZONA_WAKE_CONTROL, 0);
+
+ /* Read the flags from the interrupt controller if not specified */
+ if (!arizona->pdata.irq_flags) {
+ irq_data = irq_get_irq_data(arizona->irq);
+ if (!irq_data) {
+ dev_err(arizona->dev, "Invalid IRQ: %d\n",
+ arizona->irq);
+ return -EINVAL;
+ }
+
+ arizona->pdata.irq_flags = irqd_get_trigger_type(irq_data);
+ switch (arizona->pdata.irq_flags) {
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_FALLING:
+ break;
+
+ case IRQ_TYPE_NONE:
+ default:
+ /* Device default */
+ arizona->pdata.irq_flags = IRQF_TRIGGER_LOW;
+ break;
+ }
+ }
+
+ if (arizona->pdata.irq_flags & (IRQF_TRIGGER_HIGH |
+ IRQF_TRIGGER_RISING)) {
ret = regmap_update_bits(arizona->regmap, ARIZONA_IRQ_CTRL_1,
ARIZONA_IRQ_POL, 0);
if (ret != 0) {
@@ -200,12 +250,10 @@ int arizona_irq_init(struct arizona *arizona)
ret);
goto err;
}
-
- flags |= IRQF_TRIGGER_HIGH;
- } else {
- flags |= IRQF_TRIGGER_LOW;
}
+ flags |= arizona->pdata.irq_flags;
+
/* Allocate a virtual IRQ domain to distribute to the regmap domains */
arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
arizona);
@@ -257,11 +305,31 @@ int arizona_irq_init(struct arizona *arizona)
}
}
+ /* Used to emulate edge trigger and to work around broken pinmux */
+ if (arizona->pdata.irq_gpio) {
+ if (gpio_to_irq(arizona->pdata.irq_gpio) != arizona->irq) {
+ dev_warn(arizona->dev, "IRQ %d is not GPIO %d (%d)\n",
+ arizona->irq, arizona->pdata.irq_gpio,
+ gpio_to_irq(arizona->pdata.irq_gpio));
+ arizona->irq = gpio_to_irq(arizona->pdata.irq_gpio);
+ }
+
+ ret = devm_gpio_request_one(arizona->dev,
+ arizona->pdata.irq_gpio,
+ GPIOF_IN, "arizona IRQ");
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to request IRQ GPIO %d:: %d\n",
+ arizona->pdata.irq_gpio, ret);
+ arizona->pdata.irq_gpio = 0;
+ }
+ }
+
ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread,
flags, "arizona", arizona);
if (ret != 0) {
- dev_err(arizona->dev, "Failed to request IRQ %d: %d\n",
+ dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n",
arizona->irq, ret);
goto err_main_irq;
}
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 1b9fdd6..b57e642 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -67,7 +67,7 @@ static int arizona_spi_probe(struct spi_device *spi)
static int arizona_spi_remove(struct spi_device *spi)
{
- struct arizona *arizona = dev_get_drvdata(&spi->dev);
+ struct arizona *arizona = spi_get_drvdata(spi);
arizona_dev_exit(arizona);
return 0;
}
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index e994c96..01e4141 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -112,16 +112,34 @@ static const struct regmap_config as3711_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+#ifdef CONFIG_OF
+static struct of_device_id as3711_of_match[] = {
+ {.compatible = "ams,as3711",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, as3711_of_match);
+#endif
+
static int as3711_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct as3711 *as3711;
- struct as3711_platform_data *pdata = client->dev.platform_data;
+ struct as3711_platform_data *pdata;
unsigned int id1, id2;
int ret;
- if (!pdata)
- dev_dbg(&client->dev, "Platform data not found\n");
+ if (!client->dev.of_node) {
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ dev_dbg(&client->dev, "Platform data not found\n");
+ } else {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev, "Failed to allocate pdata\n");
+ return -ENOMEM;
+ }
+ }
as3711 = devm_kzalloc(&client->dev, sizeof(struct as3711), GFP_KERNEL);
if (!as3711) {
@@ -193,7 +211,8 @@ static struct i2c_driver as3711_i2c_driver = {
.driver = {
.name = "as3711",
.owner = THIS_MODULE,
- },
+ .of_match_table = of_match_ptr(as3711_of_match),
+ },
.probe = as3711_i2c_probe,
.remove = as3711_i2c_remove,
.id_table = as3711_i2c_id,
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
new file mode 100644
index 0000000..10cd14e
--- /dev/null
+++ b/drivers/mfd/cros_ec.c
@@ -0,0 +1,196 @@
+/*
+ * ChromeOS EC multi-function device
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The ChromeOS EC multi function device is used to mux all the requests
+ * to the EC device for its multiple features: keyboard controller,
+ * battery charging and regulator control, firmware update.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+
+int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_msg *msg)
+{
+ uint8_t *out;
+ int csum, i;
+
+ BUG_ON(msg->out_len > EC_HOST_PARAM_SIZE);
+ out = ec_dev->dout;
+ out[0] = EC_CMD_VERSION0 + msg->version;
+ out[1] = msg->cmd;
+ out[2] = msg->out_len;
+ csum = out[0] + out[1] + out[2];
+ for (i = 0; i < msg->out_len; i++)
+ csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->out_buf[i];
+ out[EC_MSG_TX_HEADER_BYTES + msg->out_len] = (uint8_t)(csum & 0xff);
+
+ return EC_MSG_TX_PROTO_BYTES + msg->out_len;
+}
+EXPORT_SYMBOL(cros_ec_prepare_tx);
+
+static int cros_ec_command_sendrecv(struct cros_ec_device *ec_dev,
+ uint16_t cmd, void *out_buf, int out_len,
+ void *in_buf, int in_len)
+{
+ struct cros_ec_msg msg;
+
+ msg.version = cmd >> 8;
+ msg.cmd = cmd & 0xff;
+ msg.out_buf = out_buf;
+ msg.out_len = out_len;
+ msg.in_buf = in_buf;
+ msg.in_len = in_len;
+
+ return ec_dev->command_xfer(ec_dev, &msg);
+}
+
+static int cros_ec_command_recv(struct cros_ec_device *ec_dev,
+ uint16_t cmd, void *buf, int buf_len)
+{
+ return cros_ec_command_sendrecv(ec_dev, cmd, NULL, 0, buf, buf_len);
+}
+
+static int cros_ec_command_send(struct cros_ec_device *ec_dev,
+ uint16_t cmd, void *buf, int buf_len)
+{
+ return cros_ec_command_sendrecv(ec_dev, cmd, buf, buf_len, NULL, 0);
+}
+
+static irqreturn_t ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+
+ if (device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+
+ blocking_notifier_call_chain(&ec_dev->event_notifier, 1, ec_dev);
+
+ return IRQ_HANDLED;
+}
+
+static struct mfd_cell cros_devs[] = {
+ {
+ .name = "cros-ec-keyb",
+ .id = 1,
+ .of_compatible = "google,cros-ec-keyb",
+ },
+};
+
+int cros_ec_register(struct cros_ec_device *ec_dev)
+{
+ struct device *dev = ec_dev->dev;
+ int err = 0;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
+
+ ec_dev->command_send = cros_ec_command_send;
+ ec_dev->command_recv = cros_ec_command_recv;
+ ec_dev->command_sendrecv = cros_ec_command_sendrecv;
+
+ if (ec_dev->din_size) {
+ ec_dev->din = kmalloc(ec_dev->din_size, GFP_KERNEL);
+ if (!ec_dev->din) {
+ err = -ENOMEM;
+ goto fail_din;
+ }
+ }
+ if (ec_dev->dout_size) {
+ ec_dev->dout = kmalloc(ec_dev->dout_size, GFP_KERNEL);
+ if (!ec_dev->dout) {
+ err = -ENOMEM;
+ goto fail_dout;
+ }
+ }
+
+ if (!ec_dev->irq) {
+ dev_dbg(dev, "no valid IRQ: %d\n", ec_dev->irq);
+ goto fail_irq;
+ }
+
+ err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "chromeos-ec", ec_dev);
+ if (err) {
+ dev_err(dev, "request irq %d: error %d\n", ec_dev->irq, err);
+ goto fail_irq;
+ }
+
+ err = mfd_add_devices(dev, 0, cros_devs,
+ ARRAY_SIZE(cros_devs),
+ NULL, ec_dev->irq, NULL);
+ if (err) {
+ dev_err(dev, "failed to add mfd devices\n");
+ goto fail_mfd;
+ }
+
+ dev_info(dev, "Chrome EC (%s)\n", ec_dev->name);
+
+ return 0;
+
+fail_mfd:
+ free_irq(ec_dev->irq, ec_dev);
+fail_irq:
+ kfree(ec_dev->dout);
+fail_dout:
+ kfree(ec_dev->din);
+fail_din:
+ return err;
+}
+EXPORT_SYMBOL(cros_ec_register);
+
+int cros_ec_remove(struct cros_ec_device *ec_dev)
+{
+ mfd_remove_devices(ec_dev->dev);
+ free_irq(ec_dev->irq, ec_dev);
+ kfree(ec_dev->dout);
+ kfree(ec_dev->din);
+
+ return 0;
+}
+EXPORT_SYMBOL(cros_ec_remove);
+
+#ifdef CONFIG_PM_SLEEP
+int cros_ec_suspend(struct cros_ec_device *ec_dev)
+{
+ struct device *dev = ec_dev->dev;
+
+ if (device_may_wakeup(dev))
+ ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
+
+ disable_irq(ec_dev->irq);
+ ec_dev->was_wake_device = ec_dev->wake_enabled;
+
+ return 0;
+}
+EXPORT_SYMBOL(cros_ec_suspend);
+
+int cros_ec_resume(struct cros_ec_device *ec_dev)
+{
+ enable_irq(ec_dev->irq);
+
+ if (ec_dev->wake_enabled) {
+ disable_irq_wake(ec_dev->irq);
+ ec_dev->wake_enabled = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cros_ec_resume);
+
+#endif
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
new file mode 100644
index 0000000..1230446
--- /dev/null
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -0,0 +1,201 @@
+/*
+ * ChromeOS EC multi-function device (I2C)
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+static inline struct cros_ec_device *to_ec_dev(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return i2c_get_clientdata(client);
+}
+
+static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_msg *msg)
+{
+ struct i2c_client *client = ec_dev->priv;
+ int ret = -ENOMEM;
+ int i;
+ int packet_len;
+ u8 *out_buf = NULL;
+ u8 *in_buf = NULL;
+ u8 sum;
+ struct i2c_msg i2c_msg[2];
+
+ i2c_msg[0].addr = client->addr;
+ i2c_msg[0].flags = 0;
+ i2c_msg[1].addr = client->addr;
+ i2c_msg[1].flags = I2C_M_RD;
+
+ /*
+ * allocate larger packet (one byte for checksum, one byte for
+ * length, and one for result code)
+ */
+ packet_len = msg->in_len + 3;
+ in_buf = kzalloc(packet_len, GFP_KERNEL);
+ if (!in_buf)
+ goto done;
+ i2c_msg[1].len = packet_len;
+ i2c_msg[1].buf = (char *)in_buf;
+
+ /*
+ * allocate larger packet (one byte for checksum, one for
+ * command code, one for length, and one for command version)
+ */
+ packet_len = msg->out_len + 4;
+ out_buf = kzalloc(packet_len, GFP_KERNEL);
+ if (!out_buf)
+ goto done;
+ i2c_msg[0].len = packet_len;
+ i2c_msg[0].buf = (char *)out_buf;
+
+ out_buf[0] = EC_CMD_VERSION0 + msg->version;
+ out_buf[1] = msg->cmd;
+ out_buf[2] = msg->out_len;
+
+ /* copy message payload and compute checksum */
+ sum = out_buf[0] + out_buf[1] + out_buf[2];
+ for (i = 0; i < msg->out_len; i++) {
+ out_buf[3 + i] = msg->out_buf[i];
+ sum += out_buf[3 + i];
+ }
+ out_buf[3 + msg->out_len] = sum;
+
+ /* send command to EC and read answer */
+ ret = i2c_transfer(client->adapter, i2c_msg, 2);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "i2c transfer failed: %d\n", ret);
+ goto done;
+ } else if (ret != 2) {
+ dev_err(ec_dev->dev, "failed to get response: %d\n", ret);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* check response error code */
+ if (i2c_msg[1].buf[0]) {
+ dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n",
+ msg->cmd, i2c_msg[1].buf[0]);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* copy response packet payload and compute checksum */
+ sum = in_buf[0] + in_buf[1];
+ for (i = 0; i < msg->in_len; i++) {
+ msg->in_buf[i] = in_buf[2 + i];
+ sum += in_buf[2 + i];
+ }
+ dev_dbg(ec_dev->dev, "packet: %*ph, sum = %02x\n",
+ i2c_msg[1].len, in_buf, sum);
+ if (sum != in_buf[2 + msg->in_len]) {
+ dev_err(ec_dev->dev, "bad packet checksum\n");
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ ret = 0;
+ done:
+ kfree(in_buf);
+ kfree(out_buf);
+ return ret;
+}
+
+static int cros_ec_probe_i2c(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct cros_ec_device *ec_dev = NULL;
+ int err;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ec_dev);
+ ec_dev->name = "I2C";
+ ec_dev->dev = dev;
+ ec_dev->priv = client;
+ ec_dev->irq = client->irq;
+ ec_dev->command_xfer = cros_ec_command_xfer;
+ ec_dev->ec_name = client->name;
+ ec_dev->phys_name = client->adapter->name;
+ ec_dev->parent = &client->dev;
+
+ err = cros_ec_register(ec_dev);
+ if (err) {
+ dev_err(dev, "cannot register EC\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int cros_ec_remove_i2c(struct i2c_client *client)
+{
+ struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
+
+ cros_ec_remove(ec_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_i2c_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = to_ec_dev(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int cros_ec_i2c_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = to_ec_dev(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_i2c_pm_ops, cros_ec_i2c_suspend,
+ cros_ec_i2c_resume);
+
+static const struct i2c_device_id cros_ec_i2c_id[] = {
+ { "cros-ec-i2c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cros_ec_i2c_id);
+
+static struct i2c_driver cros_ec_driver = {
+ .driver = {
+ .name = "cros-ec-i2c",
+ .owner = THIS_MODULE,
+ .pm = &cros_ec_i2c_pm_ops,
+ },
+ .probe = cros_ec_probe_i2c,
+ .remove = cros_ec_remove_i2c,
+ .id_table = cros_ec_i2c_id,
+};
+
+module_i2c_driver(cros_ec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC multi function device");
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
new file mode 100644
index 0000000..19193cf
--- /dev/null
+++ b/drivers/mfd/cros_ec_spi.c
@@ -0,0 +1,375 @@
+/*
+ * ChromeOS EC multi-function device (SPI)
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+
+/* The header byte, which follows the preamble */
+#define EC_MSG_HEADER 0xec
+
+/*
+ * Number of EC preamble bytes we read at a time. Since it takes
+ * about 400-500us for the EC to respond there is not a lot of
+ * point in tuning this. If the EC could respond faster then
+ * we could increase this so that might expect the preamble and
+ * message to occur in a single transaction. However, the maximum
+ * SPI transfer size is 256 bytes, so at 5MHz we need a response
+ * time of perhaps <320us (200 bytes / 1600 bits).
+ */
+#define EC_MSG_PREAMBLE_COUNT 32
+
+/*
+ * We must get a response from the EC in 5ms. This is a very long
+ * time, but the flash write command can take 2-3ms. The EC command
+ * processing is currently not very fast (about 500us). We could
+ * look at speeding this up and making the flash write command a
+ * 'slow' command, requiring a GET_STATUS wait loop, like flash
+ * erase.
+ */
+#define EC_MSG_DEADLINE_MS 5
+
+/*
+ * Time between raising the SPI chip select (for the end of a
+ * transaction) and dropping it again (for the next transaction).
+ * If we go too fast, the EC will miss the transaction. It seems
+ * that 50us is enough with the 16MHz STM32 EC.
+ */
+#define EC_SPI_RECOVERY_TIME_NS (50 * 1000)
+
+/**
+ * struct cros_ec_spi - information about a SPI-connected EC
+ *
+ * @spi: SPI device we are connected to
+ * @last_transfer_ns: time that we last finished a transfer, or 0 if there
+ * if no record
+ */
+struct cros_ec_spi {
+ struct spi_device *spi;
+ s64 last_transfer_ns;
+};
+
+static void debug_packet(struct device *dev, const char *name, u8 *ptr,
+ int len)
+{
+#ifdef DEBUG
+ int i;
+
+ dev_dbg(dev, "%s: ", name);
+ for (i = 0; i < len; i++)
+ dev_cont(dev, " %02x", ptr[i]);
+#endif
+}
+
+/**
+ * cros_ec_spi_receive_response - Receive a response from the EC.
+ *
+ * This function has two phases: reading the preamble bytes (since if we read
+ * data from the EC before it is ready to send, we just get preamble) and
+ * reading the actual message.
+ *
+ * The received data is placed into ec_dev->din.
+ *
+ * @ec_dev: ChromeOS EC device
+ * @need_len: Number of message bytes we need to read
+ */
+static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
+ int need_len)
+{
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct spi_transfer trans;
+ struct spi_message msg;
+ u8 *ptr, *end;
+ int ret;
+ unsigned long deadline;
+ int todo;
+
+ /* Receive data until we see the header byte */
+ deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
+ do {
+ memset(&trans, '\0', sizeof(trans));
+ trans.cs_change = 1;
+ trans.rx_buf = ptr = ec_dev->din;
+ trans.len = EC_MSG_PREAMBLE_COUNT;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&trans, &msg);
+ ret = spi_sync(ec_spi->spi, &msg);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+ return ret;
+ }
+
+ for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
+ if (*ptr == EC_MSG_HEADER) {
+ dev_dbg(ec_dev->dev, "msg found at %ld\n",
+ ptr - ec_dev->din);
+ break;
+ }
+ }
+
+ if (time_after(jiffies, deadline)) {
+ dev_warn(ec_dev->dev, "EC failed to respond in time\n");
+ return -ETIMEDOUT;
+ }
+ } while (ptr == end);
+
+ /*
+ * ptr now points to the header byte. Copy any valid data to the
+ * start of our buffer
+ */
+ todo = end - ++ptr;
+ BUG_ON(todo < 0 || todo > ec_dev->din_size);
+ todo = min(todo, need_len);
+ memmove(ec_dev->din, ptr, todo);
+ ptr = ec_dev->din + todo;
+ dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n",
+ need_len, todo);
+ need_len -= todo;
+
+ /* Receive data until we have it all */
+ while (need_len > 0) {
+ /*
+ * We can't support transfers larger than the SPI FIFO size
+ * unless we have DMA. We don't have DMA on the ISP SPI ports
+ * for Exynos. We need a way of asking SPI driver for
+ * maximum-supported transfer size.
+ */
+ todo = min(need_len, 256);
+ dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n",
+ todo, need_len, ptr - ec_dev->din);
+
+ memset(&trans, '\0', sizeof(trans));
+ trans.cs_change = 1;
+ trans.rx_buf = ptr;
+ trans.len = todo;
+ spi_message_init(&msg);
+ spi_message_add_tail(&trans, &msg);
+
+ /* send command to EC and read answer */
+ BUG_ON((u8 *)trans.rx_buf - ec_dev->din + todo >
+ ec_dev->din_size);
+ ret = spi_sync(ec_spi->spi, &msg);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+ return ret;
+ }
+
+ debug_packet(ec_dev->dev, "interim", ptr, todo);
+ ptr += todo;
+ need_len -= todo;
+ }
+
+ dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din);
+
+ return 0;
+}
+
+/**
+ * cros_ec_command_spi_xfer - Transfer a message over SPI and receive the reply
+ *
+ * @ec_dev: ChromeOS EC device
+ * @ec_msg: Message to transfer
+ */
+static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_msg *ec_msg)
+{
+ struct cros_ec_spi *ec_spi = ec_dev->priv;
+ struct spi_transfer trans;
+ struct spi_message msg;
+ int i, len;
+ u8 *ptr;
+ int sum;
+ int ret = 0, final_ret;
+ struct timespec ts;
+
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ /* If it's too soon to do another transaction, wait */
+ if (ec_spi->last_transfer_ns) {
+ struct timespec ts;
+ unsigned long delay; /* The delay completed so far */
+
+ ktime_get_ts(&ts);
+ delay = timespec_to_ns(&ts) - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(delay);
+ }
+
+ /* Transmit phase - send our message */
+ debug_packet(ec_dev->dev, "out", ec_dev->dout, len);
+ memset(&trans, '\0', sizeof(trans));
+ trans.tx_buf = ec_dev->dout;
+ trans.len = len;
+ trans.cs_change = 1;
+ spi_message_init(&msg);
+ spi_message_add_tail(&trans, &msg);
+ ret = spi_sync(ec_spi->spi, &msg);
+
+ /* Get the response */
+ if (!ret) {
+ ret = cros_ec_spi_receive_response(ec_dev,
+ ec_msg->in_len + EC_MSG_TX_PROTO_BYTES);
+ } else {
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+ }
+
+ /* turn off CS */
+ spi_message_init(&msg);
+ final_ret = spi_sync(ec_spi->spi, &msg);
+ ktime_get_ts(&ts);
+ ec_spi->last_transfer_ns = timespec_to_ns(&ts);
+ if (!ret)
+ ret = final_ret;
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+ return ret;
+ }
+
+ /* check response error code */
+ ptr = ec_dev->din;
+ if (ptr[0]) {
+ dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n",
+ ec_msg->cmd, ptr[0]);
+ debug_packet(ec_dev->dev, "in_err", ptr, len);
+ return -EINVAL;
+ }
+ len = ptr[1];
+ sum = ptr[0] + ptr[1];
+ if (len > ec_msg->in_len) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ len, ec_msg->in_len);
+ return -ENOSPC;
+ }
+
+ /* copy response packet payload and compute checksum */
+ for (i = 0; i < len; i++) {
+ sum += ptr[i + 2];
+ if (ec_msg->in_len)
+ ec_msg->in_buf[i] = ptr[i + 2];
+ }
+ sum &= 0xff;
+
+ debug_packet(ec_dev->dev, "in", ptr, len + 3);
+
+ if (sum != ptr[len + 2]) {
+ dev_err(ec_dev->dev,
+ "bad packet checksum, expected %02x, got %02x\n",
+ sum, ptr[len + 2]);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+static int cros_ec_probe_spi(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct cros_ec_device *ec_dev;
+ struct cros_ec_spi *ec_spi;
+ int err;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ err = spi_setup(spi);
+ if (err < 0)
+ return err;
+
+ ec_spi = devm_kzalloc(dev, sizeof(*ec_spi), GFP_KERNEL);
+ if (ec_spi == NULL)
+ return -ENOMEM;
+ ec_spi->spi = spi;
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ec_dev);
+ ec_dev->name = "SPI";
+ ec_dev->dev = dev;
+ ec_dev->priv = ec_spi;
+ ec_dev->irq = spi->irq;
+ ec_dev->command_xfer = cros_ec_command_spi_xfer;
+ ec_dev->ec_name = ec_spi->spi->modalias;
+ ec_dev->phys_name = dev_name(&ec_spi->spi->dev);
+ ec_dev->parent = &ec_spi->spi->dev;
+ ec_dev->din_size = EC_MSG_BYTES + EC_MSG_PREAMBLE_COUNT;
+ ec_dev->dout_size = EC_MSG_BYTES;
+
+ err = cros_ec_register(ec_dev);
+ if (err) {
+ dev_err(dev, "cannot register EC\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int cros_ec_remove_spi(struct spi_device *spi)
+{
+ struct cros_ec_device *ec_dev;
+
+ ec_dev = spi_get_drvdata(spi);
+ cros_ec_remove(ec_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_spi_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int cros_ec_spi_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
+ cros_ec_spi_resume);
+
+static const struct spi_device_id cros_ec_spi_id[] = {
+ { "cros-ec-spi", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, cros_ec_spi_id);
+
+static struct spi_driver cros_ec_driver_spi = {
+ .driver = {
+ .name = "cros-ec-spi",
+ .owner = THIS_MODULE,
+ .pm = &cros_ec_spi_pm_ops,
+ },
+ .probe = cros_ec_probe_spi,
+ .remove = cros_ec_remove_spi,
+ .id_table = cros_ec_spi_id,
+};
+
+module_spi_driver(cros_ec_driver_spi);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC multi function device (SPI)");
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 05176cd..f1a316e 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -499,7 +499,8 @@ static int da903x_probe(struct i2c_client *client,
unsigned int tmp;
int ret;
- chip = kzalloc(sizeof(struct da903x_chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(struct da903x_chip),
+ GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
@@ -515,33 +516,27 @@ static int da903x_probe(struct i2c_client *client,
ret = chip->ops->init_chip(chip);
if (ret)
- goto out_free_chip;
+ return ret;
/* mask and clear all IRQs */
chip->events_mask = 0xffffffff;
chip->ops->mask_events(chip, chip->events_mask);
chip->ops->read_events(chip, &tmp);
- ret = request_irq(client->irq, da903x_irq_handler,
+ ret = devm_request_irq(&client->dev, client->irq, da903x_irq_handler,
IRQF_TRIGGER_FALLING,
"da903x", chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
client->irq);
- goto out_free_chip;
+ return ret;
}
ret = da903x_add_subdevs(chip, pdata);
if (ret)
- goto out_free_irq;
+ return ret;
return 0;
-
-out_free_irq:
- free_irq(client->irq, chip);
-out_free_chip:
- kfree(chip);
- return ret;
}
static int da903x_remove(struct i2c_client *client)
@@ -549,8 +544,6 @@ static int da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
- free_irq(client->irq, chip);
- kfree(chip);
return 0;
}
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 61d63b9..0680bcb 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -38,7 +38,7 @@ static int da9052_spi_probe(struct spi_device *spi)
da9052->dev = &spi->dev;
da9052->chip_irq = spi->irq;
- dev_set_drvdata(&spi->dev, da9052);
+ spi_set_drvdata(spi, da9052);
da9052_regmap_config.read_flag_mask = 1;
da9052_regmap_config.write_flag_mask = 0;
@@ -60,7 +60,7 @@ static int da9052_spi_probe(struct spi_device *spi)
static int da9052_spi_remove(struct spi_device *spi)
{
- struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
+ struct da9052 *da9052 = spi_get_drvdata(spi);
da9052_device_exit(da9052);
return 0;
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index f56a1a9..49cb23d 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -391,7 +391,7 @@ int da9055_device_init(struct da9055 *da9055)
da9055->irq_base = pdata->irq_base;
ret = regmap_add_irq_chip(da9055->regmap, da9055->chip_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
da9055->irq_base, &da9055_regmap_irq_chip,
&da9055->irq_data);
if (ret < 0)
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index c0bcc87..c60ab0c 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -177,17 +177,7 @@ static struct platform_driver davinci_vc_driver = {
.remove = davinci_vc_remove,
};
-static int __init davinci_vc_init(void)
-{
- return platform_driver_probe(&davinci_vc_driver, davinci_vc_probe);
-}
-module_init(davinci_vc_init);
-
-static void __exit davinci_vc_exit(void)
-{
- platform_driver_unregister(&davinci_vc_driver);
-}
-module_exit(davinci_vc_exit);
+module_platform_driver_probe(davinci_vc_driver, davinci_vc_probe);
MODULE_AUTHOR("Miguel Aguilar");
MODULE_DESCRIPTION("Texas Instruments DaVinci Voice Codec Core Interface");
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 21f261b..319b8ab 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -24,9 +24,9 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/fs.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
-#include <linux/irqchip/arm-gic.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/abx500/ab8500.h>
@@ -34,9 +34,7 @@
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
#include <linux/platform_data/ux500_wdt.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-#include <mach/db8500-regs.h>
+#include <linux/platform_data/db8500_thermal.h>
#include "dbx500-prcmu-regs.h"
/* Index of different voltages to be used when accessing AVSData */
@@ -276,8 +274,34 @@ static struct irq_domain *db8500_irq_domain;
* the bits in the bit field are not. (The bits also have a tendency to move
* around, to further complicate matters.)
*/
-#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
+#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name))
#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
+
+#define IRQ_PRCMU_RTC 0
+#define IRQ_PRCMU_RTT0 1
+#define IRQ_PRCMU_RTT1 2
+#define IRQ_PRCMU_HSI0 3
+#define IRQ_PRCMU_HSI1 4
+#define IRQ_PRCMU_CA_WAKE 5
+#define IRQ_PRCMU_USB 6
+#define IRQ_PRCMU_ABB 7
+#define IRQ_PRCMU_ABB_FIFO 8
+#define IRQ_PRCMU_ARM 9
+#define IRQ_PRCMU_MODEM_SW_RESET_REQ 10
+#define IRQ_PRCMU_GPIO0 11
+#define IRQ_PRCMU_GPIO1 12
+#define IRQ_PRCMU_GPIO2 13
+#define IRQ_PRCMU_GPIO3 14
+#define IRQ_PRCMU_GPIO4 15
+#define IRQ_PRCMU_GPIO5 16
+#define IRQ_PRCMU_GPIO6 17
+#define IRQ_PRCMU_GPIO7 18
+#define IRQ_PRCMU_GPIO8 19
+#define IRQ_PRCMU_CA_SLEEP 20
+#define IRQ_PRCMU_HOTMON_LOW 21
+#define IRQ_PRCMU_HOTMON_HIGH 22
+#define NUM_PRCMU_WAKEUPS 23
+
static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
IRQ_ENTRY(RTC),
IRQ_ENTRY(RTT0),
@@ -422,9 +446,10 @@ static DEFINE_SPINLOCK(clkout_lock);
/* Global var to runtime determine TCDM base for v2 or v1 */
static __iomem void *tcdm_base;
+static __iomem void *prcmu_base;
struct clk_mgt {
- void __iomem *reg;
+ u32 offset;
u32 pllsw;
int branch;
bool clk38div;
@@ -599,9 +624,9 @@ int db8500_prcmu_set_display_clocks(void)
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
- writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
- writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT);
+ writel(PRCMU_DSI_CLOCK_SETTING, prcmu_base + PRCM_HDMICLK_MGT);
+ writel(PRCMU_DSI_LP_CLOCK_SETTING, prcmu_base + PRCM_TVCLK_MGT);
+ writel(PRCMU_DPI_CLOCK_SETTING, prcmu_base + PRCM_LCDCLK_MGT);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
@@ -613,7 +638,7 @@ int db8500_prcmu_set_display_clocks(void)
u32 db8500_prcmu_read(unsigned int reg)
{
- return readl(_PRCMU_BASE + reg);
+ return readl(prcmu_base + reg);
}
void db8500_prcmu_write(unsigned int reg, u32 value)
@@ -621,7 +646,7 @@ void db8500_prcmu_write(unsigned int reg, u32 value)
unsigned long flags;
spin_lock_irqsave(&prcmu_lock, flags);
- writel(value, (_PRCMU_BASE + reg));
+ writel(value, (prcmu_base + reg));
spin_unlock_irqrestore(&prcmu_lock, flags);
}
@@ -631,9 +656,9 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
unsigned long flags;
spin_lock_irqsave(&prcmu_lock, flags);
- val = readl(_PRCMU_BASE + reg);
+ val = readl(prcmu_base + reg);
val = ((val & ~mask) | (value & mask));
- writel(val, (_PRCMU_BASE + reg));
+ writel(val, (prcmu_base + reg));
spin_unlock_irqrestore(&prcmu_lock, flags);
}
@@ -793,119 +818,6 @@ u8 db8500_prcmu_get_power_state_result(void)
return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS);
}
-/* This function decouple the gic from the prcmu */
-int db8500_prcmu_gic_decouple(void)
-{
- u32 val = readl(PRCM_A9_MASK_REQ);
-
- /* Set bit 0 register value to 1 */
- writel(val | PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
- PRCM_A9_MASK_REQ);
-
- /* Make sure the register is updated */
- readl(PRCM_A9_MASK_REQ);
-
- /* Wait a few cycles for the gic mask completion */
- udelay(1);
-
- return 0;
-}
-
-/* This function recouple the gic with the prcmu */
-int db8500_prcmu_gic_recouple(void)
-{
- u32 val = readl(PRCM_A9_MASK_REQ);
-
- /* Set bit 0 register value to 0 */
- writel(val & ~PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, PRCM_A9_MASK_REQ);
-
- return 0;
-}
-
-#define PRCMU_GIC_NUMBER_REGS 5
-
-/*
- * This function checks if there are pending irq on the gic. It only
- * makes sense if the gic has been decoupled before with the
- * db8500_prcmu_gic_decouple function. Disabling an interrupt only
- * disables the forwarding of the interrupt to any CPU interface. It
- * does not prevent the interrupt from changing state, for example
- * becoming pending, or active and pending if it is already
- * active. Hence, we have to check the interrupt is pending *and* is
- * active.
- */
-bool db8500_prcmu_gic_pending_irq(void)
-{
- u32 pr; /* Pending register */
- u32 er; /* Enable register */
- void __iomem *dist_base = __io_address(U8500_GIC_DIST_BASE);
- int i;
-
- /* 5 registers. STI & PPI not skipped */
- for (i = 0; i < PRCMU_GIC_NUMBER_REGS; i++) {
-
- pr = readl_relaxed(dist_base + GIC_DIST_PENDING_SET + i * 4);
- er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
-
- if (pr & er)
- return true; /* There is a pending interrupt */
- }
-
- return false;
-}
-
-/*
- * This function checks if there are pending interrupt on the
- * prcmu which has been delegated to monitor the irqs with the
- * db8500_prcmu_copy_gic_settings function.
- */
-bool db8500_prcmu_pending_irq(void)
-{
- u32 it, im;
- int i;
-
- for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) {
- it = readl(PRCM_ARMITVAL31TO0 + i * 4);
- im = readl(PRCM_ARMITMSK31TO0 + i * 4);
- if (it & im)
- return true; /* There is a pending interrupt */
- }
-
- return false;
-}
-
-/*
- * This function checks if the specified cpu is in in WFI. It's usage
- * makes sense only if the gic is decoupled with the db8500_prcmu_gic_decouple
- * function. Of course passing smp_processor_id() to this function will
- * always return false...
- */
-bool db8500_prcmu_is_cpu_in_wfi(int cpu)
-{
- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
- PRCM_ARM_WFI_STANDBY_WFI0;
-}
-
-/*
- * This function copies the gic SPI settings to the prcmu in order to
- * monitor them and abort/finish the retention/off sequence or state.
- */
-int db8500_prcmu_copy_gic_settings(void)
-{
- u32 er; /* Enable register */
- void __iomem *dist_base = __io_address(U8500_GIC_DIST_BASE);
- int i;
-
- /* We skip the STI and PPI */
- for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) {
- er = readl_relaxed(dist_base +
- GIC_DIST_ENABLE_SET + (i + 1) * 4);
- writel(er, PRCM_ARMITMSK31TO0 + i * 4);
- }
-
- return 0;
-}
-
/* This function should only be called while mb0_transfer.lock is held. */
static void config_wakeups(void)
{
@@ -1059,7 +971,7 @@ int db8500_prcmu_set_ddr_opp(u8 opp)
/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
static void request_even_slower_clocks(bool enable)
{
- void __iomem *clock_reg[] = {
+ u32 clock_reg[] = {
PRCM_ACLK_MGT,
PRCM_DMACLK_MGT
};
@@ -1076,7 +988,7 @@ static void request_even_slower_clocks(bool enable)
u32 val;
u32 div;
- val = readl(clock_reg[i]);
+ val = readl(prcmu_base + clock_reg[i]);
div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
if (enable) {
if ((div <= 1) || (div > 15)) {
@@ -1092,7 +1004,7 @@ static void request_even_slower_clocks(bool enable)
}
val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
(div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
- writel(val, clock_reg[i]);
+ writel(val, prcmu_base + clock_reg[i]);
}
unlock_and_return:
@@ -1446,14 +1358,14 @@ static int request_clock(u8 clock, bool enable)
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- val = readl(clk_mgt[clock].reg);
+ val = readl(prcmu_base + clk_mgt[clock].offset);
if (enable) {
val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
} else {
clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
}
- writel(val, clk_mgt[clock].reg);
+ writel(val, prcmu_base + clk_mgt[clock].offset);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
@@ -1629,7 +1541,7 @@ static unsigned long clock_rate(u8 clock)
u32 pllsw;
unsigned long rate = ROOT_CLOCK_RATE;
- val = readl(clk_mgt[clock].reg);
+ val = readl(prcmu_base + clk_mgt[clock].offset);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV))
@@ -1785,7 +1697,7 @@ static long round_clock_rate(u8 clock, unsigned long rate)
unsigned long src_rate;
long rounded_rate;
- val = readl(clk_mgt[clock].reg);
+ val = readl(prcmu_base + clk_mgt[clock].offset);
src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
clk_mgt[clock].branch);
div = clock_divider(src_rate, rate);
@@ -1933,7 +1845,7 @@ static void set_clock_rate(u8 clock, unsigned long rate)
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- val = readl(clk_mgt[clock].reg);
+ val = readl(prcmu_base + clk_mgt[clock].offset);
src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
clk_mgt[clock].branch);
div = clock_divider(src_rate, rate);
@@ -1961,7 +1873,7 @@ static void set_clock_rate(u8 clock, unsigned long rate)
val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
val |= min(div, (u32)31);
}
- writel(val, clk_mgt[clock].reg);
+ writel(val, prcmu_base + clk_mgt[clock].offset);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
@@ -2764,14 +2676,13 @@ static struct irq_domain_ops db8500_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int db8500_irq_init(struct device_node *np)
+static int db8500_irq_init(struct device_node *np, int irq_base)
{
- int irq_base = 0;
int i;
/* In the device tree case, just take some IRQs */
- if (!np)
- irq_base = IRQ_PRCMU_BASE;
+ if (np)
+ irq_base = 0;
db8500_irq_domain = irq_domain_add_simple(
np, NUM_PRCMU_WAKEUPS, irq_base,
@@ -2794,6 +2705,7 @@ static void dbx500_fw_version_init(struct platform_device *pdev,
{
struct resource *res;
void __iomem *tcpm_base;
+ u32 version;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"prcmu-tcpm");
@@ -2803,30 +2715,42 @@ static void dbx500_fw_version_init(struct platform_device *pdev,
return;
}
tcpm_base = ioremap(res->start, resource_size(res));
- if (tcpm_base != NULL) {
- u32 version;
-
- version = readl(tcpm_base + version_offset);
- fw_info.version.project = (version & 0xFF);
- fw_info.version.api_version = (version >> 8) & 0xFF;
- fw_info.version.func_version = (version >> 16) & 0xFF;
- fw_info.version.errata = (version >> 24) & 0xFF;
- strncpy(fw_info.version.project_name,
- fw_project_name(fw_info.version.project),
- PRCMU_FW_PROJECT_NAME_LEN);
- fw_info.valid = true;
- pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
- fw_info.version.project_name,
- fw_info.version.project,
- fw_info.version.api_version,
- fw_info.version.func_version,
- fw_info.version.errata);
- iounmap(tcpm_base);
+ if (!tcpm_base) {
+ dev_err(&pdev->dev, "no prcmu tcpm mem region provided\n");
+ return;
}
-}
-void __init db8500_prcmu_early_init(void)
+ version = readl(tcpm_base + version_offset);
+ fw_info.version.project = (version & 0xFF);
+ fw_info.version.api_version = (version >> 8) & 0xFF;
+ fw_info.version.func_version = (version >> 16) & 0xFF;
+ fw_info.version.errata = (version >> 24) & 0xFF;
+ strncpy(fw_info.version.project_name,
+ fw_project_name(fw_info.version.project),
+ PRCMU_FW_PROJECT_NAME_LEN);
+ fw_info.valid = true;
+ pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
+ fw_info.version.project_name,
+ fw_info.version.project,
+ fw_info.version.api_version,
+ fw_info.version.func_version,
+ fw_info.version.errata);
+ iounmap(tcpm_base);
+}
+
+void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
{
+ /*
+ * This is a temporary remap to bring up the clocks. It is
+ * subsequently replaces with a real remap. After the merge of
+ * the mailbox subsystem all of this early code goes away, and the
+ * clock driver can probe independently. An early initcall will
+ * still be needed, but it can be diverted into drivers/clk/ux500.
+ */
+ prcmu_base = ioremap(phy_base, size);
+ if (!prcmu_base)
+ pr_err("%s: ioremap() of prcmu registers failed!\n", __func__);
+
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
mutex_init(&mb0_transfer.ac_wake_lock);
@@ -3092,18 +3016,66 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
},
};
-static struct resource ab8500_resources[] = {
- [0] = {
- .start = IRQ_DB8500_AB8500,
- .end = IRQ_DB8500_AB8500,
- .flags = IORESOURCE_IRQ
- }
-};
-
static struct ux500_wdt_data db8500_wdt_pdata = {
.timeout = 600, /* 10 minutes */
.has_28_bits_resolution = true,
};
+/*
+ * Thermal Sensor
+ */
+
+static struct resource db8500_thsens_resources[] = {
+ {
+ .name = "IRQ_HOTMON_LOW",
+ .start = IRQ_PRCMU_HOTMON_LOW,
+ .end = IRQ_PRCMU_HOTMON_LOW,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IRQ_HOTMON_HIGH",
+ .start = IRQ_PRCMU_HOTMON_HIGH,
+ .end = IRQ_PRCMU_HOTMON_HIGH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct db8500_thsens_platform_data db8500_thsens_data = {
+ .trip_points[0] = {
+ .temp = 70000,
+ .type = THERMAL_TRIP_ACTIVE,
+ .cdev_name = {
+ [0] = "thermal-cpufreq-0",
+ },
+ },
+ .trip_points[1] = {
+ .temp = 75000,
+ .type = THERMAL_TRIP_ACTIVE,
+ .cdev_name = {
+ [0] = "thermal-cpufreq-0",
+ },
+ },
+ .trip_points[2] = {
+ .temp = 80000,
+ .type = THERMAL_TRIP_ACTIVE,
+ .cdev_name = {
+ [0] = "thermal-cpufreq-0",
+ },
+ },
+ .trip_points[3] = {
+ .temp = 85000,
+ .type = THERMAL_TRIP_CRITICAL,
+ },
+ .num_trips = 4,
+};
+
+static struct mfd_cell common_prcmu_devs[] = {
+ {
+ .name = "ux500_wdt",
+ .platform_data = &db8500_wdt_pdata,
+ .pdata_size = sizeof(db8500_wdt_pdata),
+ .id = -1,
+ },
+};
static struct mfd_cell db8500_prcmu_devs[] = {
{
@@ -3119,17 +3091,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
.pdata_size = sizeof(db8500_cpufreq_table),
},
{
- .name = "ux500_wdt",
- .platform_data = &db8500_wdt_pdata,
- .pdata_size = sizeof(db8500_wdt_pdata),
- .id = -1,
- },
- {
- .name = "ab8500-core",
- .of_compatible = "stericsson,ab8500",
- .num_resources = ARRAY_SIZE(ab8500_resources),
- .resources = ab8500_resources,
- .id = AB8500_VERSION_AB8500,
+ .name = "db8500-thermal",
+ .num_resources = ARRAY_SIZE(db8500_thsens_resources),
+ .resources = db8500_thsens_resources,
+ .platform_data = &db8500_thsens_data,
},
};
@@ -3141,6 +3106,24 @@ static void db8500_prcmu_update_cpufreq(void)
}
}
+static int db8500_prcmu_register_ab8500(struct device *parent,
+ struct ab8500_platform_data *pdata,
+ int irq)
+{
+ struct resource ab8500_resource = DEFINE_RES_IRQ(irq);
+ struct mfd_cell ab8500_cell = {
+ .name = "ab8500-core",
+ .of_compatible = "stericsson,ab8500",
+ .id = AB8500_VERSION_AB8500,
+ .platform_data = pdata,
+ .pdata_size = sizeof(struct ab8500_platform_data),
+ .resources = &ab8500_resource,
+ .num_resources = 1,
+ };
+
+ return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
+}
+
/**
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
@@ -3149,11 +3132,21 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
- int irq = 0, err = 0, i;
+ int irq = 0, err = 0;
struct resource *res;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu");
+ if (!res) {
+ dev_err(&pdev->dev, "no prcmu memory region provided\n");
+ return -ENOENT;
+ }
+ prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!prcmu_base) {
+ dev_err(&pdev->dev,
+ "failed to ioremap prcmu register memory\n");
+ return -ENOENT;
+ }
init_prcm_registers();
-
dbx500_fw_version_init(pdev, pdata->version_offset);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
@@ -3180,26 +3173,39 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
goto no_irq_return;
}
- db8500_irq_init(np);
-
- for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) {
- if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) {
- db8500_prcmu_devs[i].platform_data = pdata->ab_platdata;
- db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data);
- }
- }
+ db8500_irq_init(np, pdata->irq_base);
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
db8500_prcmu_update_cpufreq();
- err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
- ARRAY_SIZE(db8500_prcmu_devs), NULL, 0, NULL);
+ err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs,
+ ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain);
if (err) {
pr_err("prcmu: Failed to add subdevices\n");
return err;
}
+ /* TODO: Remove restriction when clk definitions are available. */
+ if (!of_machine_is_compatible("st-ericsson,u8540")) {
+ err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+ ARRAY_SIZE(db8500_prcmu_devs), NULL, 0,
+ db8500_irq_domain);
+ if (err) {
+ mfd_remove_devices(&pdev->dev);
+ pr_err("prcmu: Failed to add subdevices\n");
+ goto no_irq_return;
+ }
+ }
+
+ err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata,
+ pdata->ab_irq);
+ if (err) {
+ mfd_remove_devices(&pdev->dev);
+ pr_err("prcmu: Failed to add ab8500 subdevice\n");
+ goto no_irq_return;
+ }
+
pr_info("DB8500 PRCMU initialized\n");
no_irq_return:
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index 79c76eb..d14836e 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -13,136 +13,110 @@
#ifndef __DB8500_PRCMU_REGS_H
#define __DB8500_PRCMU_REGS_H
-#include <mach/hardware.h>
-
#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
-#define PRCM_CLK_MGT(_offset) (void __iomem *)(IO_ADDRESS(U8500_PRCMU_BASE) \
- + _offset)
-#define PRCM_ACLK_MGT PRCM_CLK_MGT(0x004)
-#define PRCM_SVACLK_MGT PRCM_CLK_MGT(0x008)
-#define PRCM_SIACLK_MGT PRCM_CLK_MGT(0x00C)
-#define PRCM_SGACLK_MGT PRCM_CLK_MGT(0x014)
-#define PRCM_UARTCLK_MGT PRCM_CLK_MGT(0x018)
-#define PRCM_MSP02CLK_MGT PRCM_CLK_MGT(0x01C)
-#define PRCM_I2CCLK_MGT PRCM_CLK_MGT(0x020)
-#define PRCM_SDMMCCLK_MGT PRCM_CLK_MGT(0x024)
-#define PRCM_SLIMCLK_MGT PRCM_CLK_MGT(0x028)
-#define PRCM_PER1CLK_MGT PRCM_CLK_MGT(0x02C)
-#define PRCM_PER2CLK_MGT PRCM_CLK_MGT(0x030)
-#define PRCM_PER3CLK_MGT PRCM_CLK_MGT(0x034)
-#define PRCM_PER5CLK_MGT PRCM_CLK_MGT(0x038)
-#define PRCM_PER6CLK_MGT PRCM_CLK_MGT(0x03C)
-#define PRCM_PER7CLK_MGT PRCM_CLK_MGT(0x040)
-#define PRCM_LCDCLK_MGT PRCM_CLK_MGT(0x044)
-#define PRCM_BMLCLK_MGT PRCM_CLK_MGT(0x04C)
-#define PRCM_HSITXCLK_MGT PRCM_CLK_MGT(0x050)
-#define PRCM_HSIRXCLK_MGT PRCM_CLK_MGT(0x054)
-#define PRCM_HDMICLK_MGT PRCM_CLK_MGT(0x058)
-#define PRCM_APEATCLK_MGT PRCM_CLK_MGT(0x05C)
-#define PRCM_APETRACECLK_MGT PRCM_CLK_MGT(0x060)
-#define PRCM_MCDECLK_MGT PRCM_CLK_MGT(0x064)
-#define PRCM_IPI2CCLK_MGT PRCM_CLK_MGT(0x068)
-#define PRCM_DSIALTCLK_MGT PRCM_CLK_MGT(0x06C)
-#define PRCM_DMACLK_MGT PRCM_CLK_MGT(0x074)
-#define PRCM_B2R2CLK_MGT PRCM_CLK_MGT(0x078)
-#define PRCM_TVCLK_MGT PRCM_CLK_MGT(0x07C)
-#define PRCM_UNIPROCLK_MGT PRCM_CLK_MGT(0x278)
-#define PRCM_SSPCLK_MGT PRCM_CLK_MGT(0x280)
-#define PRCM_RNGCLK_MGT PRCM_CLK_MGT(0x284)
-#define PRCM_UICCCLK_MGT PRCM_CLK_MGT(0x27C)
-#define PRCM_MSP1CLK_MGT PRCM_CLK_MGT(0x288)
-
-#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
+#define PRCM_ACLK_MGT (0x004)
+#define PRCM_SVACLK_MGT (0x008)
+#define PRCM_SIACLK_MGT (0x00C)
+#define PRCM_SGACLK_MGT (0x014)
+#define PRCM_UARTCLK_MGT (0x018)
+#define PRCM_MSP02CLK_MGT (0x01C)
+#define PRCM_I2CCLK_MGT (0x020)
+#define PRCM_SDMMCCLK_MGT (0x024)
+#define PRCM_SLIMCLK_MGT (0x028)
+#define PRCM_PER1CLK_MGT (0x02C)
+#define PRCM_PER2CLK_MGT (0x030)
+#define PRCM_PER3CLK_MGT (0x034)
+#define PRCM_PER5CLK_MGT (0x038)
+#define PRCM_PER6CLK_MGT (0x03C)
+#define PRCM_PER7CLK_MGT (0x040)
+#define PRCM_LCDCLK_MGT (0x044)
+#define PRCM_BMLCLK_MGT (0x04C)
+#define PRCM_HSITXCLK_MGT (0x050)
+#define PRCM_HSIRXCLK_MGT (0x054)
+#define PRCM_HDMICLK_MGT (0x058)
+#define PRCM_APEATCLK_MGT (0x05C)
+#define PRCM_APETRACECLK_MGT (0x060)
+#define PRCM_MCDECLK_MGT (0x064)
+#define PRCM_IPI2CCLK_MGT (0x068)
+#define PRCM_DSIALTCLK_MGT (0x06C)
+#define PRCM_DMACLK_MGT (0x074)
+#define PRCM_B2R2CLK_MGT (0x078)
+#define PRCM_TVCLK_MGT (0x07C)
+#define PRCM_UNIPROCLK_MGT (0x278)
+#define PRCM_SSPCLK_MGT (0x280)
+#define PRCM_RNGCLK_MGT (0x284)
+#define PRCM_UICCCLK_MGT (0x27C)
+#define PRCM_MSP1CLK_MGT (0x288)
+
+#define PRCM_ARM_PLLDIVPS (prcmu_base + 0x118)
#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f
#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xf
-#define PRCM_PLLARM_LOCKP (_PRCMU_BASE + 0x0a8)
+#define PRCM_PLLARM_LOCKP (prcmu_base + 0x0a8)
#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 0x2
-#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114)
+#define PRCM_ARM_CHGCLKREQ (prcmu_base + 0x114)
#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ BIT(0)
#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_DIVSEL BIT(16)
-#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98)
+#define PRCM_PLLARM_ENABLE (prcmu_base + 0x98)
#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE 0x1
#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON 0x100
-#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0)
-#define PRCM_A9PL_FORCE_CLKEN (_PRCMU_BASE + 0x19C)
-#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4)
-#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0)
-#define PRCM_ARM_LS_CLAMP (_PRCMU_BASE + 0x30c)
-#define PRCM_SRAM_A9 (_PRCMU_BASE + 0x308)
+#define PRCM_ARMCLKFIX_MGT (prcmu_base + 0x0)
+#define PRCM_A9PL_FORCE_CLKEN (prcmu_base + 0x19C)
+#define PRCM_A9_RESETN_CLR (prcmu_base + 0x1f4)
+#define PRCM_A9_RESETN_SET (prcmu_base + 0x1f0)
+#define PRCM_ARM_LS_CLAMP (prcmu_base + 0x30c)
+#define PRCM_SRAM_A9 (prcmu_base + 0x308)
#define PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN BIT(0)
#define PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN BIT(1)
-/* ARM WFI Standby signal register */
-#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
-#define PRCM_ARM_WFI_STANDBY_WFI0 0x08
-#define PRCM_ARM_WFI_STANDBY_WFI1 0x10
-#define PRCM_IOCR (_PRCMU_BASE + 0x310)
-#define PRCM_IOCR_IOFORCE 0x1
-
/* CPU mailbox registers */
-#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc)
-#define PRCM_MBOX_CPU_SET (_PRCMU_BASE + 0x100)
-#define PRCM_MBOX_CPU_CLR (_PRCMU_BASE + 0x104)
-
-/* Dual A9 core interrupt management unit registers */
-#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328)
-#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1
-
-#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c)
-#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c)
-#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120)
-#define PRCM_ARMITMSK95TO64 (_PRCMU_BASE + 0x124)
-#define PRCM_ARMITMSK127TO96 (_PRCMU_BASE + 0x128)
-#define PRCM_POWER_STATE_VAL (_PRCMU_BASE + 0x25C)
-#define PRCM_ARMITVAL31TO0 (_PRCMU_BASE + 0x260)
-#define PRCM_ARMITVAL63TO32 (_PRCMU_BASE + 0x264)
-#define PRCM_ARMITVAL95TO64 (_PRCMU_BASE + 0x268)
-#define PRCM_ARMITVAL127TO96 (_PRCMU_BASE + 0x26C)
-
-#define PRCM_HOSTACCESS_REQ (_PRCMU_BASE + 0x334)
+#define PRCM_MBOX_CPU_VAL (prcmu_base + 0x0fc)
+#define PRCM_MBOX_CPU_SET (prcmu_base + 0x100)
+#define PRCM_MBOX_CPU_CLR (prcmu_base + 0x104)
+
+#define PRCM_HOSTACCESS_REQ (prcmu_base + 0x334)
#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ 0x1
#define PRCM_HOSTACCESS_REQ_WAKE_REQ BIT(16)
#define ARM_WAKEUP_MODEM 0x1
-#define PRCM_ARM_IT1_CLR (_PRCMU_BASE + 0x48C)
-#define PRCM_ARM_IT1_VAL (_PRCMU_BASE + 0x494)
-#define PRCM_HOLD_EVT (_PRCMU_BASE + 0x174)
+#define PRCM_ARM_IT1_CLR (prcmu_base + 0x48C)
+#define PRCM_ARM_IT1_VAL (prcmu_base + 0x494)
+#define PRCM_HOLD_EVT (prcmu_base + 0x174)
-#define PRCM_MOD_AWAKE_STATUS (_PRCMU_BASE + 0x4A0)
+#define PRCM_MOD_AWAKE_STATUS (prcmu_base + 0x4A0)
#define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE BIT(0)
#define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE BIT(1)
#define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_VMODEM_OFF_ISO BIT(2)
-#define PRCM_ITSTATUS0 (_PRCMU_BASE + 0x148)
-#define PRCM_ITSTATUS1 (_PRCMU_BASE + 0x150)
-#define PRCM_ITSTATUS2 (_PRCMU_BASE + 0x158)
-#define PRCM_ITSTATUS3 (_PRCMU_BASE + 0x160)
-#define PRCM_ITSTATUS4 (_PRCMU_BASE + 0x168)
-#define PRCM_ITSTATUS5 (_PRCMU_BASE + 0x484)
-#define PRCM_ITCLEAR5 (_PRCMU_BASE + 0x488)
-#define PRCM_ARMIT_MASKXP70_IT (_PRCMU_BASE + 0x1018)
+#define PRCM_ITSTATUS0 (prcmu_base + 0x148)
+#define PRCM_ITSTATUS1 (prcmu_base + 0x150)
+#define PRCM_ITSTATUS2 (prcmu_base + 0x158)
+#define PRCM_ITSTATUS3 (prcmu_base + 0x160)
+#define PRCM_ITSTATUS4 (prcmu_base + 0x168)
+#define PRCM_ITSTATUS5 (prcmu_base + 0x484)
+#define PRCM_ITCLEAR5 (prcmu_base + 0x488)
+#define PRCM_ARMIT_MASKXP70_IT (prcmu_base + 0x1018)
/* System reset register */
-#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
+#define PRCM_APE_SOFTRST (prcmu_base + 0x228)
/* Level shifter and clamp control registers */
-#define PRCM_MMIP_LS_CLAMP_SET (_PRCMU_BASE + 0x420)
-#define PRCM_MMIP_LS_CLAMP_CLR (_PRCMU_BASE + 0x424)
+#define PRCM_MMIP_LS_CLAMP_SET (prcmu_base + 0x420)
+#define PRCM_MMIP_LS_CLAMP_CLR (prcmu_base + 0x424)
#define PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP BIT(11)
#define PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI BIT(22)
/* PRCMU clock/PLL/reset registers */
-#define PRCM_PLLSOC0_FREQ (_PRCMU_BASE + 0x080)
-#define PRCM_PLLSOC1_FREQ (_PRCMU_BASE + 0x084)
-#define PRCM_PLLARM_FREQ (_PRCMU_BASE + 0x088)
-#define PRCM_PLLDDR_FREQ (_PRCMU_BASE + 0x08C)
+#define PRCM_PLLSOC0_FREQ (prcmu_base + 0x080)
+#define PRCM_PLLSOC1_FREQ (prcmu_base + 0x084)
+#define PRCM_PLLARM_FREQ (prcmu_base + 0x088)
+#define PRCM_PLLDDR_FREQ (prcmu_base + 0x08C)
#define PRCM_PLL_FREQ_D_SHIFT 0
#define PRCM_PLL_FREQ_D_MASK BITS(0, 7)
#define PRCM_PLL_FREQ_N_SHIFT 8
@@ -152,14 +126,14 @@
#define PRCM_PLL_FREQ_SELDIV2 BIT(24)
#define PRCM_PLL_FREQ_DIV2EN BIT(25)
-#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
-#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
-#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
-#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
-#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
-#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
-#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
-#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
+#define PRCM_PLLDSI_FREQ (prcmu_base + 0x500)
+#define PRCM_PLLDSI_ENABLE (prcmu_base + 0x504)
+#define PRCM_PLLDSI_LOCKP (prcmu_base + 0x508)
+#define PRCM_DSI_PLLOUT_SEL (prcmu_base + 0x530)
+#define PRCM_DSITVCLK_DIV (prcmu_base + 0x52C)
+#define PRCM_PLLDSI_LOCKP (prcmu_base + 0x508)
+#define PRCM_APE_RESETN_SET (prcmu_base + 0x1E4)
+#define PRCM_APE_RESETN_CLR (prcmu_base + 0x1E8)
#define PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE BIT(0)
@@ -188,30 +162,30 @@
#define PRCM_APE_RESETN_DSIPLL_RESETN BIT(14)
-#define PRCM_CLKOCR (_PRCMU_BASE + 0x1CC)
+#define PRCM_CLKOCR (prcmu_base + 0x1CC)
#define PRCM_CLKOCR_CLKOUT0_REF_CLK (1 << 0)
#define PRCM_CLKOCR_CLKOUT0_MASK BITS(0, 13)
#define PRCM_CLKOCR_CLKOUT1_REF_CLK (1 << 16)
#define PRCM_CLKOCR_CLKOUT1_MASK BITS(16, 29)
/* ePOD and memory power signal control registers */
-#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410)
-#define PRCM_SRAM_LS_SLEEP (_PRCMU_BASE + 0x304)
+#define PRCM_EPOD_C_SET (prcmu_base + 0x410)
+#define PRCM_SRAM_LS_SLEEP (prcmu_base + 0x304)
/* Debug power control unit registers */
-#define PRCM_POWER_STATE_SET (_PRCMU_BASE + 0x254)
+#define PRCM_POWER_STATE_SET (prcmu_base + 0x254)
/* Miscellaneous unit registers */
-#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324)
-#define PRCM_GPIOCR (_PRCMU_BASE + 0x138)
+#define PRCM_DSI_SW_RESET (prcmu_base + 0x324)
+#define PRCM_GPIOCR (prcmu_base + 0x138)
#define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800
#define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1
/* PRCMU HW semaphore */
-#define PRCM_SEM (_PRCMU_BASE + 0x400)
+#define PRCM_SEM (prcmu_base + 0x400)
#define PRCM_SEM_PRCM_SEM BIT(0)
-#define PRCM_TCR (_PRCMU_BASE + 0x1C8)
+#define PRCM_TCR (prcmu_base + 0x1C8)
#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
#define PRCM_TCR_STOP_TIMERS BIT(16)
#define PRCM_TCR_DOZE_MODE BIT(17)
@@ -239,15 +213,15 @@
/* GPIOCR register */
#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
-#define PRCM_DDR_SUBSYS_APE_MINBW (_PRCMU_BASE + 0x438)
-#define PRCM_CGATING_BYPASS (_PRCMU_BASE + 0x134)
+#define PRCM_DDR_SUBSYS_APE_MINBW (prcmu_base + 0x438)
+#define PRCM_CGATING_BYPASS (prcmu_base + 0x134)
#define PRCM_CGATING_BYPASS_ICN2 BIT(6)
/* Miscellaneous unit registers */
-#define PRCM_RESOUTN_SET (_PRCMU_BASE + 0x214)
-#define PRCM_RESOUTN_CLR (_PRCMU_BASE + 0x218)
+#define PRCM_RESOUTN_SET (prcmu_base + 0x214)
+#define PRCM_RESOUTN_CLR (prcmu_base + 0x218)
/* System reset register */
-#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
+#define PRCM_APE_SOFTRST (prcmu_base + 0x228)
#endif /* __DB8500_PRCMU_REGS_H */
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index b7a61f0..5502106 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -393,7 +393,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
static int ezx_pcap_remove(struct spi_device *spi)
{
- struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
+ struct pcap_chip *pcap = spi_get_drvdata(spi);
struct pcap_platform_data *pdata = spi->dev.platform_data;
int i, adc_irq;
@@ -403,7 +403,7 @@ static int ezx_pcap_remove(struct spi_device *spi)
/* cleanup ADC */
adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
- free_irq(adc_irq, pcap);
+ devm_free_irq(&spi->dev, adc_irq, pcap);
mutex_lock(&pcap->adc_mutex);
for (i = 0; i < PCAP_ADC_MAXQ; i++)
kfree(pcap->adc_queue[i]);
@@ -415,8 +415,6 @@ static int ezx_pcap_remove(struct spi_device *spi)
destroy_workqueue(pcap->workqueue);
- kfree(pcap);
-
return 0;
}
@@ -431,7 +429,7 @@ static int ezx_pcap_probe(struct spi_device *spi)
if (!pdata)
goto ret;
- pcap = kzalloc(sizeof(*pcap), GFP_KERNEL);
+ pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL);
if (!pcap) {
ret = -ENOMEM;
goto ret;
@@ -441,14 +439,14 @@ static int ezx_pcap_probe(struct spi_device *spi)
mutex_init(&pcap->adc_mutex);
INIT_WORK(&pcap->isr_work, pcap_isr_work);
INIT_WORK(&pcap->msr_work, pcap_msr_work);
- dev_set_drvdata(&spi->dev, pcap);
+ spi_set_drvdata(spi, pcap);
/* setup spi */
spi->bits_per_word = 32;
spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
ret = spi_setup(spi);
if (ret)
- goto free_pcap;
+ goto ret;
pcap->spi = spi;
@@ -458,7 +456,7 @@ static int ezx_pcap_probe(struct spi_device *spi)
if (!pcap->workqueue) {
ret = -ENOMEM;
dev_err(&spi->dev, "can't create pcap thread\n");
- goto free_pcap;
+ goto ret;
}
/* redirect interrupts to AP, except adcdone2 */
@@ -491,7 +489,8 @@ static int ezx_pcap_probe(struct spi_device *spi)
adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
- ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap);
+ ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC",
+ pcap);
if (ret)
goto free_irqchip;
@@ -511,14 +510,12 @@ static int ezx_pcap_probe(struct spi_device *spi)
remove_subdevs:
device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
/* free_adc: */
- free_irq(adc_irq, pcap);
+ devm_free_irq(&spi->dev, adc_irq, pcap);
free_irqchip:
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
irq_set_chip_and_handler(i, NULL, NULL);
/* destroy_workqueue: */
destroy_workqueue(pcap->workqueue);
-free_pcap:
- kfree(pcap);
ret:
return ret;
}
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 9e5453d..0285fce 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -208,18 +208,7 @@ static struct platform_driver pasic3_driver = {
.remove = pasic3_remove,
};
-static int __init pasic3_base_init(void)
-{
- return platform_driver_probe(&pasic3_driver, pasic3_probe);
-}
-
-static void __exit pasic3_base_exit(void)
-{
- platform_driver_unregister(&pasic3_driver);
-}
-
-module_init(pasic3_base_init);
-module_exit(pasic3_base_exit);
+module_platform_driver_probe(pasic3_driver, pasic3_probe);
MODULE_AUTHOR("Philipp Zabel <philipp.zabel@gmail.com>");
MODULE_DESCRIPTION("Core driver for HTC PASIC3");
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 1804331..5be3b5e 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -323,7 +323,8 @@ static int intel_msic_init_devices(struct intel_msic *msic)
if (pdata->ocd) {
unsigned gpio = pdata->ocd->gpio;
- ret = gpio_request_one(gpio, GPIOF_IN, "ocd_gpio");
+ ret = devm_gpio_request_one(&pdev->dev, gpio,
+ GPIOF_IN, "ocd_gpio");
if (ret) {
dev_err(&pdev->dev, "failed to register OCD GPIO\n");
return ret;
@@ -332,7 +333,6 @@ static int intel_msic_init_devices(struct intel_msic *msic)
ret = gpio_to_irq(gpio);
if (ret < 0) {
dev_err(&pdev->dev, "no IRQ number for OCD GPIO\n");
- gpio_free(gpio);
return ret;
}
@@ -359,8 +359,6 @@ static int intel_msic_init_devices(struct intel_msic *msic)
fail:
mfd_remove_devices(&pdev->dev);
- if (pdata->ocd)
- gpio_free(pdata->ocd->gpio);
return ret;
}
@@ -368,12 +366,8 @@ fail:
static void intel_msic_remove_devices(struct intel_msic *msic)
{
struct platform_device *pdev = msic->pdev;
- struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
mfd_remove_devices(&pdev->dev);
-
- if (pdata->ocd)
- gpio_free(pdata->ocd->gpio);
}
static int intel_msic_probe(struct platform_device *pdev)
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index ceebf2c..4b7e6da 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -496,8 +496,8 @@ static int lm3533_device_init(struct lm3533 *lm3533)
dev_set_drvdata(lm3533->dev, lm3533);
if (gpio_is_valid(lm3533->gpio_hwen)) {
- ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
- "lm3533-hwen");
+ ret = devm_gpio_request_one(lm3533->dev, lm3533->gpio_hwen,
+ GPIOF_OUT_INIT_LOW, "lm3533-hwen");
if (ret < 0) {
dev_err(lm3533->dev,
"failed to request HWEN GPIO %d\n",
@@ -528,8 +528,6 @@ err_unregister:
mfd_remove_devices(lm3533->dev);
err_disable:
lm3533_disable(lm3533);
- if (gpio_is_valid(lm3533->gpio_hwen))
- gpio_free(lm3533->gpio_hwen);
return ret;
}
@@ -542,8 +540,6 @@ static void lm3533_device_exit(struct lm3533 *lm3533)
mfd_remove_devices(lm3533->dev);
lm3533_disable(lm3533);
- if (gpio_is_valid(lm3533->gpio_hwen))
- gpio_free(lm3533->gpio_hwen);
}
static bool lm3533_readable_register(struct device *dev, unsigned int reg)
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 4d73963..1cbb176 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -46,7 +46,7 @@ static struct regmap_config max77686_regmap_config = {
#ifdef CONFIG_OF
static struct of_device_id max77686_pmic_dt_match[] = {
- {.compatible = "maxim,max77686", .data = 0},
+ {.compatible = "maxim,max77686", .data = NULL},
{},
};
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 3032bae..77189da 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -131,7 +131,7 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
if (!mc13xxx)
return -ENOMEM;
- dev_set_drvdata(&spi->dev, mc13xxx);
+ spi_set_drvdata(spi, mc13xxx);
spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
mc13xxx->dev = &spi->dev;
@@ -144,7 +144,7 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
ret);
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
return ret;
}
@@ -164,7 +164,7 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
static int mc13xxx_spi_remove(struct spi_device *spi)
{
- struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+ struct mc13xxx *mc13xxx = spi_get_drvdata(spi);
mc13xxx_common_cleanup(mc13xxx);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 4febc5c..759fae3 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -1,8 +1,9 @@
/**
* omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2011-2013 Texas Instruments Incorporated - http://www.ti.com
* Author: Keshava Munegowda <keshava_mgowda@ti.com>
+ * Author: Roger Quadros <rogerq@ti.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 of
@@ -27,6 +28,9 @@
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
#include "omap-usb.h"
@@ -137,6 +141,49 @@ static inline u8 usbhs_readb(void __iomem *base, u8 reg)
/*-------------------------------------------------------------------------*/
+/**
+ * Map 'enum usbhs_omap_port_mode' found in <linux/platform_data/usb-omap.h>
+ * to the device tree binding portN-mode found in
+ * 'Documentation/devicetree/bindings/mfd/omap-usb-host.txt'
+ */
+static const char * const port_modes[] = {
+ [OMAP_USBHS_PORT_MODE_UNUSED] = "",
+ [OMAP_EHCI_PORT_MODE_PHY] = "ehci-phy",
+ [OMAP_EHCI_PORT_MODE_TLL] = "ehci-tll",
+ [OMAP_EHCI_PORT_MODE_HSIC] = "ehci-hsic",
+ [OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0] = "ohci-phy-6pin-datse0",
+ [OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM] = "ohci-phy-6pin-dpdm",
+ [OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0] = "ohci-phy-3pin-datse0",
+ [OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM] = "ohci-phy-4pin-dpdm",
+ [OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0] = "ohci-tll-6pin-datse0",
+ [OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM] = "ohci-tll-6pin-dpdm",
+ [OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0] = "ohci-tll-3pin-datse0",
+ [OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM] = "ohci-tll-4pin-dpdm",
+ [OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0] = "ohci-tll-2pin-datse0",
+ [OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM] = "ohci-tll-2pin-dpdm",
+};
+
+/**
+ * omap_usbhs_get_dt_port_mode - Get the 'enum usbhs_omap_port_mode'
+ * from the port mode string.
+ * @mode: The port mode string, usually obtained from device tree.
+ *
+ * The function returns the 'enum usbhs_omap_port_mode' that matches the
+ * provided port mode string as per the port_modes table.
+ * If no match is found it returns -ENODEV
+ */
+static const int omap_usbhs_get_dt_port_mode(const char *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_modes); i++) {
+ if (!strcmp(mode, port_modes[i]))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
static struct platform_device *omap_usbhs_alloc_child(const char *name,
struct resource *res, int num_resources, void *pdata,
size_t pdata_size, struct device *dev)
@@ -278,7 +325,7 @@ static int usbhs_runtime_resume(struct device *dev)
dev_dbg(dev, "usbhs_runtime_resume\n");
- omap_tll_enable();
+ omap_tll_enable(pdata);
if (!IS_ERR(omap->ehci_logic_fck))
clk_enable(omap->ehci_logic_fck);
@@ -353,7 +400,7 @@ static int usbhs_runtime_suspend(struct device *dev)
if (!IS_ERR(omap->ehci_logic_fck))
clk_disable(omap->ehci_logic_fck);
- omap_tll_disable();
+ omap_tll_disable(pdata);
return 0;
}
@@ -430,24 +477,10 @@ static unsigned omap_usbhs_rev2_hostconfig(struct usbhs_hcd_omap *omap,
static void omap_usbhs_init(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = omap->pdata;
unsigned reg;
dev_dbg(dev, "starting TI HSUSB Controller\n");
- if (pdata->phy_reset) {
- if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_request_one(pdata->reset_gpio_port[0],
- GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
-
- if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_request_one(pdata->reset_gpio_port[1],
- GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
-
- /* Hold the PHY in RESET for enough time till DIR is high */
- udelay(10);
- }
-
pm_runtime_get_sync(dev);
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
@@ -476,36 +509,59 @@ static void omap_usbhs_init(struct device *dev)
dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
pm_runtime_put_sync(dev);
- if (pdata->phy_reset) {
- /* Hold the PHY in RESET for enough time till
- * PHY is settled and ready
- */
- udelay(10);
+}
+
+static int usbhs_omap_get_dt_pdata(struct device *dev,
+ struct usbhs_omap_platform_data *pdata)
+{
+ int ret, i;
+ struct device_node *node = dev->of_node;
- if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_set_value_cansleep
- (pdata->reset_gpio_port[0], 1);
+ ret = of_property_read_u32(node, "num-ports", &pdata->nports);
+ if (ret)
+ pdata->nports = 0;
- if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_set_value_cansleep
- (pdata->reset_gpio_port[1], 1);
+ if (pdata->nports > OMAP3_HS_USB_PORTS) {
+ dev_warn(dev, "Too many num_ports <%d> in device tree. Max %d\n",
+ pdata->nports, OMAP3_HS_USB_PORTS);
+ return -ENODEV;
}
-}
-static void omap_usbhs_deinit(struct device *dev)
-{
- struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = omap->pdata;
+ /* get port modes */
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++) {
+ char prop[11];
+ const char *mode;
- if (pdata->phy_reset) {
- if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_free(pdata->reset_gpio_port[0]);
+ pdata->port_mode[i] = OMAP_USBHS_PORT_MODE_UNUSED;
+
+ snprintf(prop, sizeof(prop), "port%d-mode", i + 1);
+ ret = of_property_read_string(node, prop, &mode);
+ if (ret < 0)
+ continue;
+
+ ret = omap_usbhs_get_dt_port_mode(mode);
+ if (ret < 0) {
+ dev_warn(dev, "Invalid port%d-mode \"%s\" in device tree\n",
+ i, mode);
+ return -ENODEV;
+ }
- if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_free(pdata->reset_gpio_port[1]);
+ dev_dbg(dev, "port%d-mode: %s -> %d\n", i, mode, ret);
+ pdata->port_mode[i] = ret;
}
+
+ /* get flags */
+ pdata->single_ulpi_bypass = of_property_read_bool(node,
+ "single-ulpi-bypass");
+
+ return 0;
}
+static struct of_device_id usbhs_child_match_table[] = {
+ { .compatible = "ti,omap-ehci", },
+ { .compatible = "ti,omap-ohci", },
+ { }
+};
/**
* usbhs_omap_probe - initialize TI-based HCDs
@@ -522,26 +578,46 @@ static int usbhs_omap_probe(struct platform_device *pdev)
int i;
bool need_logic_fck;
+ if (dev->of_node) {
+ /* For DT boot we populate platform data from OF node */
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = usbhs_omap_get_dt_pdata(dev, pdata);
+ if (ret)
+ return ret;
+
+ dev->platform_data = pdata;
+ }
+
if (!pdata) {
dev_err(dev, "Missing platform data\n");
return -ENODEV;
}
+ if (pdata->nports > OMAP3_HS_USB_PORTS) {
+ dev_info(dev, "Too many num_ports <%d> in platform_data. Max %d\n",
+ pdata->nports, OMAP3_HS_USB_PORTS);
+ return -ENODEV;
+ }
+
omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
if (!omap) {
dev_err(dev, "Memory allocation failed\n");
return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
- omap->uhh_base = devm_request_and_ioremap(dev, res);
- if (!omap->uhh_base) {
- dev_err(dev, "Resource request/ioremap failed\n");
- return -EADDRNOTAVAIL;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ omap->uhh_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(omap->uhh_base))
+ return PTR_ERR(omap->uhh_base);
omap->pdata = pdata;
+ /* Initialize the TLL subsystem */
+ omap_tll_init(pdata);
+
pm_runtime_enable(dev);
platform_set_drvdata(pdev, omap);
@@ -575,6 +651,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
omap->usbhs_rev, omap->nports);
break;
}
+ pdata->nports = omap->nports;
}
i = sizeof(struct clk *) * omap->nports;
@@ -700,17 +777,28 @@ static int usbhs_omap_probe(struct platform_device *pdev)
}
omap_usbhs_init(dev);
- ret = omap_usbhs_alloc_children(pdev);
- if (ret) {
- dev_err(dev, "omap_usbhs_alloc_children failed\n");
- goto err_alloc;
+
+ if (dev->of_node) {
+ ret = of_platform_populate(dev->of_node,
+ usbhs_child_match_table, NULL, dev);
+
+ if (ret) {
+ dev_err(dev, "Failed to create DT children: %d\n", ret);
+ goto err_alloc;
+ }
+
+ } else {
+ ret = omap_usbhs_alloc_children(pdev);
+ if (ret) {
+ dev_err(dev, "omap_usbhs_alloc_children failed: %d\n",
+ ret);
+ goto err_alloc;
+ }
}
return 0;
err_alloc:
- omap_usbhs_deinit(&pdev->dev);
-
for (i = 0; i < omap->nports; i++) {
if (!IS_ERR(omap->utmi_clk[i]))
clk_put(omap->utmi_clk[i]);
@@ -744,6 +832,13 @@ err_mem:
return ret;
}
+static int usbhs_omap_remove_child(struct device *dev, void *data)
+{
+ dev_info(dev, "unregistering\n");
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
/**
* usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
* @pdev: USB Host Controller being removed
@@ -755,8 +850,6 @@ static int usbhs_omap_remove(struct platform_device *pdev)
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
int i;
- omap_usbhs_deinit(&pdev->dev);
-
for (i = 0; i < omap->nports; i++) {
if (!IS_ERR(omap->utmi_clk[i]))
clk_put(omap->utmi_clk[i]);
@@ -777,6 +870,8 @@ static int usbhs_omap_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ /* remove children */
+ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
return 0;
}
@@ -785,16 +880,26 @@ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
.runtime_resume = usbhs_runtime_resume,
};
+static const struct of_device_id usbhs_omap_dt_ids[] = {
+ { .compatible = "ti,usbhs-host" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, usbhs_omap_dt_ids);
+
+
static struct platform_driver usbhs_omap_driver = {
.driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
.pm = &usbhsomap_dev_pm_ops,
+ .of_match_table = of_match_ptr(usbhs_omap_dt_ids),
},
.remove = usbhs_omap_remove,
};
MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 0aef1a7..e59ac4c 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -1,8 +1,9 @@
/**
* omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com
* Author: Keshava Munegowda <keshava_mgowda@ti.com>
+ * Author: Roger Quadros <rogerq@ti.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 of
@@ -27,6 +28,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/usb-omap.h>
+#include <linux/of.h>
#define USBTLL_DRIVER_NAME "usbhs_tll"
@@ -105,8 +107,8 @@
struct usbtll_omap {
int nch; /* num. of channels */
- struct usbhs_omap_platform_data *pdata;
struct clk **ch_clk;
+ void __iomem *base;
};
/*-------------------------------------------------------------------------*/
@@ -210,14 +212,10 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
static int usbtll_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
- void __iomem *base;
struct resource *res;
struct usbtll_omap *tll;
- unsigned reg;
int ret = 0;
int i, ver;
- bool needs_tll;
dev_dbg(dev, "starting TI HSUSB TLL Controller\n");
@@ -227,26 +225,16 @@ static int usbtll_omap_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (!pdata) {
- dev_err(dev, "Platform data missing\n");
- return -ENODEV;
- }
-
- tll->pdata = pdata;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_request_and_ioremap(dev, res);
- if (!base) {
- ret = -EADDRNOTAVAIL;
- dev_err(dev, "Resource request/ioremap failed:%d\n", ret);
- return ret;
- }
+ tll->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(tll->base))
+ return PTR_ERR(tll->base);
platform_set_drvdata(pdev, tll);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ver = usbtll_read(base, OMAP_USBTLL_REVISION);
+ ver = usbtll_read(tll->base, OMAP_USBTLL_REVISION);
switch (ver) {
case OMAP_USBTLL_REV1:
case OMAP_USBTLL_REV4:
@@ -283,11 +271,85 @@ static int usbtll_omap_probe(struct platform_device *pdev)
dev_dbg(dev, "can't get clock : %s\n", clkname);
}
+ pm_runtime_put_sync(dev);
+ /* only after this can omap_tll_enable/disable work */
+ spin_lock(&tll_lock);
+ tll_dev = dev;
+ spin_unlock(&tll_lock);
+
+ return 0;
+
+err_clk_alloc:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+/**
+ * usbtll_omap_remove - shutdown processing for UHH & TLL HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usbtll_omap_probe().
+ */
+static int usbtll_omap_remove(struct platform_device *pdev)
+{
+ struct usbtll_omap *tll = platform_get_drvdata(pdev);
+ int i;
+
+ spin_lock(&tll_lock);
+ tll_dev = NULL;
+ spin_unlock(&tll_lock);
+
+ for (i = 0; i < tll->nch; i++)
+ if (!IS_ERR(tll->ch_clk[i]))
+ clk_put(tll->ch_clk[i]);
+
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id usbtll_omap_dt_ids[] = {
+ { .compatible = "ti,usbhs-tll" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, usbtll_omap_dt_ids);
+
+static struct platform_driver usbtll_omap_driver = {
+ .driver = {
+ .name = (char *)usbtll_driver_name,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(usbtll_omap_dt_ids),
+ },
+ .probe = usbtll_omap_probe,
+ .remove = usbtll_omap_remove,
+};
+
+int omap_tll_init(struct usbhs_omap_platform_data *pdata)
+{
+ int i;
+ bool needs_tll;
+ unsigned reg;
+ struct usbtll_omap *tll;
+
+ spin_lock(&tll_lock);
+
+ if (!tll_dev) {
+ spin_unlock(&tll_lock);
+ return -ENODEV;
+ }
+
+ tll = dev_get_drvdata(tll_dev);
+
needs_tll = false;
for (i = 0; i < tll->nch; i++)
needs_tll |= omap_usb_mode_needs_tll(pdata->port_mode[i]);
+ pm_runtime_get_sync(tll_dev);
+
if (needs_tll) {
+ void __iomem *base = tll->base;
/* Program Common TLL register */
reg = usbtll_read(base, OMAP_TLL_SHARED_CONF);
@@ -336,51 +398,29 @@ static int usbtll_omap_probe(struct platform_device *pdev)
}
}
- pm_runtime_put_sync(dev);
- /* only after this can omap_tll_enable/disable work */
- spin_lock(&tll_lock);
- tll_dev = dev;
+ pm_runtime_put_sync(tll_dev);
+
spin_unlock(&tll_lock);
return 0;
-
-err_clk_alloc:
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
-
- return ret;
}
+EXPORT_SYMBOL_GPL(omap_tll_init);
-/**
- * usbtll_omap_remove - shutdown processing for UHH & TLL HCDs
- * @pdev: USB Host Controller being removed
- *
- * Reverses the effect of usbtll_omap_probe().
- */
-static int usbtll_omap_remove(struct platform_device *pdev)
+int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
{
- struct usbtll_omap *tll = platform_get_drvdata(pdev);
int i;
+ struct usbtll_omap *tll;
spin_lock(&tll_lock);
- tll_dev = NULL;
- spin_unlock(&tll_lock);
- for (i = 0; i < tll->nch; i++)
- if (!IS_ERR(tll->ch_clk[i]))
- clk_put(tll->ch_clk[i]);
-
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
+ if (!tll_dev) {
+ spin_unlock(&tll_lock);
+ return -ENODEV;
+ }
-static int usbtll_runtime_resume(struct device *dev)
-{
- struct usbtll_omap *tll = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = tll->pdata;
- int i;
+ tll = dev_get_drvdata(tll_dev);
- dev_dbg(dev, "usbtll_runtime_resume\n");
+ pm_runtime_get_sync(tll_dev);
for (i = 0; i < tll->nch; i++) {
if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
@@ -391,22 +431,31 @@ static int usbtll_runtime_resume(struct device *dev)
r = clk_enable(tll->ch_clk[i]);
if (r) {
- dev_err(dev,
+ dev_err(tll_dev,
"Error enabling ch %d clock: %d\n", i, r);
}
}
}
+ spin_unlock(&tll_lock);
+
return 0;
}
+EXPORT_SYMBOL_GPL(omap_tll_enable);
-static int usbtll_runtime_suspend(struct device *dev)
+int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
{
- struct usbtll_omap *tll = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = tll->pdata;
int i;
+ struct usbtll_omap *tll;
- dev_dbg(dev, "usbtll_runtime_suspend\n");
+ spin_lock(&tll_lock);
+
+ if (!tll_dev) {
+ spin_unlock(&tll_lock);
+ return -ENODEV;
+ }
+
+ tll = dev_get_drvdata(tll_dev);
for (i = 0; i < tll->nch; i++) {
if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
@@ -415,64 +464,16 @@ static int usbtll_runtime_suspend(struct device *dev)
}
}
- return 0;
-}
-
-static const struct dev_pm_ops usbtllomap_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(usbtll_runtime_suspend,
- usbtll_runtime_resume,
- NULL)
-};
-
-static struct platform_driver usbtll_omap_driver = {
- .driver = {
- .name = (char *)usbtll_driver_name,
- .owner = THIS_MODULE,
- .pm = &usbtllomap_dev_pm_ops,
- },
- .probe = usbtll_omap_probe,
- .remove = usbtll_omap_remove,
-};
-
-int omap_tll_enable(void)
-{
- int ret;
-
- spin_lock(&tll_lock);
-
- if (!tll_dev) {
- pr_err("%s: OMAP USB TLL not initialized\n", __func__);
- ret = -ENODEV;
- } else {
- ret = pm_runtime_get_sync(tll_dev);
- }
-
- spin_unlock(&tll_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(omap_tll_enable);
-
-int omap_tll_disable(void)
-{
- int ret;
-
- spin_lock(&tll_lock);
-
- if (!tll_dev) {
- pr_err("%s: OMAP USB TLL not initialized\n", __func__);
- ret = -ENODEV;
- } else {
- ret = pm_runtime_put_sync(tll_dev);
- }
+ pm_runtime_put_sync(tll_dev);
spin_unlock(&tll_lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_tll_disable);
MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb tll driver for TI OMAP EHCI and OHCI controllers");
diff --git a/drivers/mfd/omap-usb.h b/drivers/mfd/omap-usb.h
index 972aa96..2a508b6 100644
--- a/drivers/mfd/omap-usb.h
+++ b/drivers/mfd/omap-usb.h
@@ -1,2 +1,3 @@
-extern int omap_tll_enable(void);
-extern int omap_tll_disable(void);
+extern int omap_tll_init(struct usbhs_omap_platform_data *pdata);
+extern int omap_tll_enable(struct usbhs_omap_platform_data *pdata);
+extern int omap_tll_disable(struct usbhs_omap_platform_data *pdata);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 73bf76d..53e9fe6 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -278,20 +278,20 @@ static void palmas_dt_to_pdata(struct i2c_client *i2c,
int ret;
u32 prop;
- ret = of_property_read_u32(node, "ti,mux_pad1", &prop);
+ ret = of_property_read_u32(node, "ti,mux-pad1", &prop);
if (!ret) {
pdata->mux_from_pdata = 1;
pdata->pad1 = prop;
}
- ret = of_property_read_u32(node, "ti,mux_pad2", &prop);
+ ret = of_property_read_u32(node, "ti,mux-pad2", &prop);
if (!ret) {
pdata->mux_from_pdata = 1;
pdata->pad2 = prop;
}
/* The default for this register is all masked */
- ret = of_property_read_u32(node, "ti,power_ctrl", &prop);
+ ret = of_property_read_u32(node, "ti,power-ctrl", &prop);
if (!ret)
pdata->power_ctrl = prop;
else
@@ -349,6 +349,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
ret = -ENOMEM;
goto err;
}
+ palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
}
palmas->regmap[i] = devm_regmap_init_i2c(palmas->i2c_clients[i],
&palmas_regmap_config[i]);
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 3ba0486..a183098 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -1,5 +1,5 @@
/*
- * Retu MFD driver
+ * Retu/Tahvo MFD driver
*
* Copyright (C) 2004, 2005 Nokia Corporation
*
@@ -33,7 +33,8 @@
#define RETU_REG_ASICR 0x00 /* ASIC ID and revision */
#define RETU_REG_ASICR_VILMA (1 << 7) /* Bit indicating Vilma */
#define RETU_REG_IDR 0x01 /* Interrupt ID */
-#define RETU_REG_IMR 0x02 /* Interrupt mask */
+#define RETU_REG_IMR 0x02 /* Interrupt mask (Retu) */
+#define TAHVO_REG_IMR 0x03 /* Interrupt mask (Tahvo) */
/* Interrupt sources */
#define RETU_INT_PWR 0 /* Power button */
@@ -84,6 +85,62 @@ static struct regmap_irq_chip retu_irq_chip = {
/* Retu device registered for the power off. */
static struct retu_dev *retu_pm_power_off;
+static struct resource tahvo_usb_res[] = {
+ {
+ .name = "tahvo-usb",
+ .start = TAHVO_INT_VBUS,
+ .end = TAHVO_INT_VBUS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tahvo_devs[] = {
+ {
+ .name = "tahvo-usb",
+ .resources = tahvo_usb_res,
+ .num_resources = ARRAY_SIZE(tahvo_usb_res),
+ },
+};
+
+static struct regmap_irq tahvo_irqs[] = {
+ [TAHVO_INT_VBUS] = {
+ .mask = 1 << TAHVO_INT_VBUS,
+ }
+};
+
+static struct regmap_irq_chip tahvo_irq_chip = {
+ .name = "TAHVO",
+ .irqs = tahvo_irqs,
+ .num_irqs = ARRAY_SIZE(tahvo_irqs),
+ .num_regs = 1,
+ .status_base = RETU_REG_IDR,
+ .mask_base = TAHVO_REG_IMR,
+ .ack_base = RETU_REG_IDR,
+};
+
+static const struct retu_data {
+ char *chip_name;
+ char *companion_name;
+ struct regmap_irq_chip *irq_chip;
+ struct mfd_cell *children;
+ int nchildren;
+} retu_data[] = {
+ [0] = {
+ .chip_name = "Retu",
+ .companion_name = "Vilma",
+ .irq_chip = &retu_irq_chip,
+ .children = retu_devs,
+ .nchildren = ARRAY_SIZE(retu_devs),
+ },
+ [1] = {
+ .chip_name = "Tahvo",
+ .companion_name = "Betty",
+ .irq_chip = &tahvo_irq_chip,
+ .children = tahvo_devs,
+ .nchildren = ARRAY_SIZE(tahvo_devs),
+ }
+};
+
int retu_read(struct retu_dev *rdev, u8 reg)
{
int ret;
@@ -173,9 +230,14 @@ static struct regmap_config retu_config = {
static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
+ struct retu_data const *rdat;
struct retu_dev *rdev;
int ret;
+ if (i2c->addr > ARRAY_SIZE(retu_data))
+ return -ENODEV;
+ rdat = &retu_data[i2c->addr - 1];
+
rdev = devm_kzalloc(&i2c->dev, sizeof(*rdev), GFP_KERNEL);
if (rdev == NULL)
return -ENOMEM;
@@ -190,25 +252,27 @@ static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
ret = retu_read(rdev, RETU_REG_ASICR);
if (ret < 0) {
- dev_err(rdev->dev, "could not read Retu revision: %d\n", ret);
+ dev_err(rdev->dev, "could not read %s revision: %d\n",
+ rdat->chip_name, ret);
return ret;
}
- dev_info(rdev->dev, "Retu%s v%d.%d found\n",
- (ret & RETU_REG_ASICR_VILMA) ? " & Vilma" : "",
+ dev_info(rdev->dev, "%s%s%s v%d.%d found\n", rdat->chip_name,
+ (ret & RETU_REG_ASICR_VILMA) ? " & " : "",
+ (ret & RETU_REG_ASICR_VILMA) ? rdat->companion_name : "",
(ret >> 4) & 0x7, ret & 0xf);
- /* Mask all RETU interrupts. */
- ret = retu_write(rdev, RETU_REG_IMR, 0xffff);
+ /* Mask all interrupts. */
+ ret = retu_write(rdev, rdat->irq_chip->mask_base, 0xffff);
if (ret < 0)
return ret;
ret = regmap_add_irq_chip(rdev->regmap, i2c->irq, IRQF_ONESHOT, -1,
- &retu_irq_chip, &rdev->irq_data);
+ rdat->irq_chip, &rdev->irq_data);
if (ret < 0)
return ret;
- ret = mfd_add_devices(rdev->dev, -1, retu_devs, ARRAY_SIZE(retu_devs),
+ ret = mfd_add_devices(rdev->dev, -1, rdat->children, rdat->nchildren,
NULL, regmap_irq_chip_get_base(rdev->irq_data),
NULL);
if (ret < 0) {
@@ -216,7 +280,7 @@ static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
}
- if (!pm_power_off) {
+ if (i2c->addr == 1 && !pm_power_off) {
retu_pm_power_off = rdev;
pm_power_off = retu_power_off;
}
@@ -240,6 +304,7 @@ static int retu_remove(struct i2c_client *i2c)
static const struct i2c_device_id retu_id[] = {
{ "retu-mfd", 0 },
+ { "tahvo-mfd", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, retu_id);
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
new file mode 100644
index 0000000..15dc848
--- /dev/null
+++ b/drivers/mfd/rts5249.c
@@ -0,0 +1,241 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 128, West Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & 0x0F;
+}
+
+static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_init_cmd(pcr);
+
+ /* Configure GPIO as output */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Switch LDO3318 source from DV33 to card_3v3 */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+ /* Correct driving */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_CLK_DRIVE_SEL, 0xFF, 0x99);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_CMD_DRIVE_SEL, 0xFF, 0x99);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_DAT_DRIVE_SEL, 0xFF, 0x92);
+
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 0xFE46);
+ if (err < 0)
+ return err;
+
+ msleep(1);
+
+ return rtsx_pci_write_phy_register(pcr, PHY_BPCR, 0x05C0);
+}
+
+static int rts5249_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rts5249_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rts5249_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rts5249_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rts5249_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_VCC_PARTIAL_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x02);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ msleep(5);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_VCC_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x06);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int rts5249_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_POWER_OFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x00);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u8 clk_drive, cmd_drive, dat_drive;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ clk_drive = 0x99;
+ cmd_drive = 0x99;
+ dat_drive = 0x92;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, PHY_BACR, 0x3C02);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ clk_drive = 0xb3;
+ cmd_drive = 0xb3;
+ dat_drive = 0xb3;
+ } else {
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, clk_drive);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, cmd_drive);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, dat_drive);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static const struct pcr_ops rts5249_pcr_ops = {
+ .extra_init_hw = rts5249_extra_init_hw,
+ .optimize_phy = rts5249_optimize_phy,
+ .turn_on_led = rts5249_turn_on_led,
+ .turn_off_led = rts5249_turn_off_led,
+ .enable_auto_blink = rts5249_enable_auto_blink,
+ .disable_auto_blink = rts5249_disable_auto_blink,
+ .card_power_on = rts5249_card_power_on,
+ .card_power_off = rts5249_card_power_off,
+ .switch_output_voltage = rts5249_switch_output_voltage,
+};
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5249_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5249_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5249_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+void rts5249_init_params(struct rtsx_pcr *pcr)
+{
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+ pcr->ops = &rts5249_pcr_ops;
+
+ pcr->ic_version = rts5249_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5249_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 2f12cc1..e968c01 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -56,6 +56,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = {
{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -1033,6 +1034,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5227:
rts5227_init_params(pcr);
break;
+
+ case 0x5249:
+ rts5249_init_params(pcr);
+ break;
}
dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1138,7 +1143,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
ret = rtsx_pci_acquire_irq(pcr);
if (ret < 0)
- goto free_dma;
+ goto disable_msi;
pci_set_master(pcidev);
synchronize_irq(pcr->irq);
@@ -1162,7 +1167,9 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
disable_irq:
free_irq(pcr->irq, (void *)pcr);
-free_dma:
+disable_msi:
+ if (pcr->msi_en)
+ pci_disable_msi(pcr->pci);
dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 2b3ab8a..55fcfc2 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -32,5 +32,6 @@ void rts5209_init_params(struct rtsx_pcr *pcr);
void rts5229_init_params(struct rtsx_pcr *pcr);
void rtl8411_init_params(struct rtsx_pcr *pcr);
void rts5227_init_params(struct rtsx_pcr *pcr);
+void rts5249_init_params(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c
new file mode 100644
index 0000000..de48b4e
--- /dev/null
+++ b/drivers/mfd/si476x-cmd.c
@@ -0,0 +1,1553 @@
+/*
+ * drivers/mfd/si476x-cmd.c -- Subroutines implementing command
+ * protocol of si476x series of chips
+ *
+ * Copyright (C) 2012 Innovative Converged Devices(ICD)
+ * Copyright (C) 2013 Andrey Smirnov
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/i2c.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/videodev2.h>
+
+#include <linux/mfd/si476x-core.h>
+
+#define msb(x) ((u8)((u16) x >> 8))
+#define lsb(x) ((u8)((u16) x & 0x00FF))
+
+
+
+#define CMD_POWER_UP 0x01
+#define CMD_POWER_UP_A10_NRESP 1
+#define CMD_POWER_UP_A10_NARGS 5
+
+#define CMD_POWER_UP_A20_NRESP 1
+#define CMD_POWER_UP_A20_NARGS 5
+
+#define POWER_UP_DELAY_MS 110
+
+#define CMD_POWER_DOWN 0x11
+#define CMD_POWER_DOWN_A10_NRESP 1
+
+#define CMD_POWER_DOWN_A20_NRESP 1
+#define CMD_POWER_DOWN_A20_NARGS 1
+
+#define CMD_FUNC_INFO 0x12
+#define CMD_FUNC_INFO_NRESP 7
+
+#define CMD_SET_PROPERTY 0x13
+#define CMD_SET_PROPERTY_NARGS 5
+#define CMD_SET_PROPERTY_NRESP 1
+
+#define CMD_GET_PROPERTY 0x14
+#define CMD_GET_PROPERTY_NARGS 3
+#define CMD_GET_PROPERTY_NRESP 4
+
+#define CMD_AGC_STATUS 0x17
+#define CMD_AGC_STATUS_NRESP_A10 2
+#define CMD_AGC_STATUS_NRESP_A20 6
+
+#define PIN_CFG_BYTE(x) (0x7F & (x))
+#define CMD_DIG_AUDIO_PIN_CFG 0x18
+#define CMD_DIG_AUDIO_PIN_CFG_NARGS 4
+#define CMD_DIG_AUDIO_PIN_CFG_NRESP 5
+
+#define CMD_ZIF_PIN_CFG 0x19
+#define CMD_ZIF_PIN_CFG_NARGS 4
+#define CMD_ZIF_PIN_CFG_NRESP 5
+
+#define CMD_IC_LINK_GPO_CTL_PIN_CFG 0x1A
+#define CMD_IC_LINK_GPO_CTL_PIN_CFG_NARGS 4
+#define CMD_IC_LINK_GPO_CTL_PIN_CFG_NRESP 5
+
+#define CMD_ANA_AUDIO_PIN_CFG 0x1B
+#define CMD_ANA_AUDIO_PIN_CFG_NARGS 1
+#define CMD_ANA_AUDIO_PIN_CFG_NRESP 2
+
+#define CMD_INTB_PIN_CFG 0x1C
+#define CMD_INTB_PIN_CFG_NARGS 2
+#define CMD_INTB_PIN_CFG_A10_NRESP 6
+#define CMD_INTB_PIN_CFG_A20_NRESP 3
+
+#define CMD_FM_TUNE_FREQ 0x30
+#define CMD_FM_TUNE_FREQ_A10_NARGS 5
+#define CMD_FM_TUNE_FREQ_A20_NARGS 3
+#define CMD_FM_TUNE_FREQ_NRESP 1
+
+#define CMD_FM_RSQ_STATUS 0x32
+
+#define CMD_FM_RSQ_STATUS_A10_NARGS 1
+#define CMD_FM_RSQ_STATUS_A10_NRESP 17
+#define CMD_FM_RSQ_STATUS_A30_NARGS 1
+#define CMD_FM_RSQ_STATUS_A30_NRESP 23
+
+
+#define CMD_FM_SEEK_START 0x31
+#define CMD_FM_SEEK_START_NARGS 1
+#define CMD_FM_SEEK_START_NRESP 1
+
+#define CMD_FM_RDS_STATUS 0x36
+#define CMD_FM_RDS_STATUS_NARGS 1
+#define CMD_FM_RDS_STATUS_NRESP 16
+
+#define CMD_FM_RDS_BLOCKCOUNT 0x37
+#define CMD_FM_RDS_BLOCKCOUNT_NARGS 1
+#define CMD_FM_RDS_BLOCKCOUNT_NRESP 8
+
+#define CMD_FM_PHASE_DIVERSITY 0x38
+#define CMD_FM_PHASE_DIVERSITY_NARGS 1
+#define CMD_FM_PHASE_DIVERSITY_NRESP 1
+
+#define CMD_FM_PHASE_DIV_STATUS 0x39
+#define CMD_FM_PHASE_DIV_STATUS_NRESP 2
+
+#define CMD_AM_TUNE_FREQ 0x40
+#define CMD_AM_TUNE_FREQ_NARGS 3
+#define CMD_AM_TUNE_FREQ_NRESP 1
+
+#define CMD_AM_RSQ_STATUS 0x42
+#define CMD_AM_RSQ_STATUS_NARGS 1
+#define CMD_AM_RSQ_STATUS_NRESP 13
+
+#define CMD_AM_SEEK_START 0x41
+#define CMD_AM_SEEK_START_NARGS 1
+#define CMD_AM_SEEK_START_NRESP 1
+
+
+#define CMD_AM_ACF_STATUS 0x45
+#define CMD_AM_ACF_STATUS_NRESP 6
+#define CMD_AM_ACF_STATUS_NARGS 1
+
+#define CMD_FM_ACF_STATUS 0x35
+#define CMD_FM_ACF_STATUS_NRESP 8
+#define CMD_FM_ACF_STATUS_NARGS 1
+
+#define CMD_MAX_ARGS_COUNT (10)
+
+
+enum si476x_acf_status_report_bits {
+ SI476X_ACF_BLEND_INT = (1 << 4),
+ SI476X_ACF_HIBLEND_INT = (1 << 3),
+ SI476X_ACF_HICUT_INT = (1 << 2),
+ SI476X_ACF_CHBW_INT = (1 << 1),
+ SI476X_ACF_SOFTMUTE_INT = (1 << 0),
+
+ SI476X_ACF_SMUTE = (1 << 0),
+ SI476X_ACF_SMATTN = 0b11111,
+ SI476X_ACF_PILOT = (1 << 7),
+ SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT,
+};
+
+enum si476x_agc_status_report_bits {
+ SI476X_AGC_MXHI = (1 << 5),
+ SI476X_AGC_MXLO = (1 << 4),
+ SI476X_AGC_LNAHI = (1 << 3),
+ SI476X_AGC_LNALO = (1 << 2),
+};
+
+enum si476x_errors {
+ SI476X_ERR_BAD_COMMAND = 0x10,
+ SI476X_ERR_BAD_ARG1 = 0x11,
+ SI476X_ERR_BAD_ARG2 = 0x12,
+ SI476X_ERR_BAD_ARG3 = 0x13,
+ SI476X_ERR_BAD_ARG4 = 0x14,
+ SI476X_ERR_BUSY = 0x18,
+ SI476X_ERR_BAD_INTERNAL_MEMORY = 0x20,
+ SI476X_ERR_BAD_PATCH = 0x30,
+ SI476X_ERR_BAD_BOOT_MODE = 0x31,
+ SI476X_ERR_BAD_PROPERTY = 0x40,
+};
+
+static int si476x_core_parse_and_nag_about_error(struct si476x_core *core)
+{
+ int err;
+ char *cause;
+ u8 buffer[2];
+
+ if (core->revision != SI476X_REVISION_A10) {
+ err = si476x_core_i2c_xfer(core, SI476X_I2C_RECV,
+ buffer, sizeof(buffer));
+ if (err == sizeof(buffer)) {
+ switch (buffer[1]) {
+ case SI476X_ERR_BAD_COMMAND:
+ cause = "Bad command";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BAD_ARG1:
+ cause = "Bad argument #1";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BAD_ARG2:
+ cause = "Bad argument #2";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BAD_ARG3:
+ cause = "Bad argument #3";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BAD_ARG4:
+ cause = "Bad argument #4";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BUSY:
+ cause = "Chip is busy";
+ err = -EBUSY;
+ break;
+ case SI476X_ERR_BAD_INTERNAL_MEMORY:
+ cause = "Bad internal memory";
+ err = -EIO;
+ break;
+ case SI476X_ERR_BAD_PATCH:
+ cause = "Bad patch";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BAD_BOOT_MODE:
+ cause = "Bad boot mode";
+ err = -EINVAL;
+ break;
+ case SI476X_ERR_BAD_PROPERTY:
+ cause = "Bad property";
+ err = -EINVAL;
+ break;
+ default:
+ cause = "Unknown";
+ err = -EIO;
+ }
+
+ dev_err(&core->client->dev,
+ "[Chip error status]: %s\n", cause);
+ } else {
+ dev_err(&core->client->dev,
+ "Failed to fetch error code\n");
+ err = (err >= 0) ? -EIO : err;
+ }
+ } else {
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/**
+ * si476x_core_send_command() - sends a command to si476x and waits its
+ * response
+ * @core: si476x_device structure for the device we are
+ * communicating with
+ * @command: command id
+ * @args: command arguments we are sending
+ * @argn: actual size of @args
+ * @response: buffer to place the expected response from the device
+ * @respn: actual size of @response
+ * @usecs: amount of time to wait before reading the response (in
+ * usecs)
+ *
+ * Function returns 0 on succsess and negative error code on
+ * failure
+ */
+static int si476x_core_send_command(struct si476x_core *core,
+ const u8 command,
+ const u8 args[],
+ const int argn,
+ u8 resp[],
+ const int respn,
+ const int usecs)
+{
+ struct i2c_client *client = core->client;
+ int err;
+ u8 data[CMD_MAX_ARGS_COUNT + 1];
+
+ if (argn > CMD_MAX_ARGS_COUNT) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (!client->adapter) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /* First send the command and its arguments */
+ data[0] = command;
+ memcpy(&data[1], args, argn);
+ dev_dbg(&client->dev, "Command:\n %*ph\n", argn + 1, data);
+
+ err = si476x_core_i2c_xfer(core, SI476X_I2C_SEND,
+ (char *) data, argn + 1);
+ if (err != argn + 1) {
+ dev_err(&core->client->dev,
+ "Error while sending command 0x%02x\n",
+ command);
+ err = (err >= 0) ? -EIO : err;
+ goto exit;
+ }
+ /* Set CTS to zero only after the command is send to avoid
+ * possible racing conditions when working in polling mode */
+ atomic_set(&core->cts, 0);
+
+ /* if (unlikely(command == CMD_POWER_DOWN) */
+ if (!wait_event_timeout(core->command,
+ atomic_read(&core->cts),
+ usecs_to_jiffies(usecs) + 1))
+ dev_warn(&core->client->dev,
+ "(%s) [CMD 0x%02x] Answer timeout.\n",
+ __func__, command);
+
+ /*
+ When working in polling mode, for some reason the tuner will
+ report CTS bit as being set in the first status byte read,
+ but all the consequtive ones will return zeros until the
+ tuner is actually completed the POWER_UP command. To
+ workaround that we wait for second CTS to be reported
+ */
+ if (unlikely(!core->client->irq && command == CMD_POWER_UP)) {
+ if (!wait_event_timeout(core->command,
+ atomic_read(&core->cts),
+ usecs_to_jiffies(usecs) + 1))
+ dev_warn(&core->client->dev,
+ "(%s) Power up took too much time.\n",
+ __func__);
+ }
+
+ /* Then get the response */
+ err = si476x_core_i2c_xfer(core, SI476X_I2C_RECV, resp, respn);
+ if (err != respn) {
+ dev_err(&core->client->dev,
+ "Error while reading response for command 0x%02x\n",
+ command);
+ err = (err >= 0) ? -EIO : err;
+ goto exit;
+ }
+ dev_dbg(&client->dev, "Response:\n %*ph\n", respn, resp);
+
+ err = 0;
+
+ if (resp[0] & SI476X_ERR) {
+ dev_err(&core->client->dev,
+ "[CMD 0x%02x] Chip set error flag\n", command);
+ err = si476x_core_parse_and_nag_about_error(core);
+ goto exit;
+ }
+
+ if (!(resp[0] & SI476X_CTS))
+ err = -EBUSY;
+exit:
+ return err;
+}
+
+static int si476x_cmd_clear_stc(struct si476x_core *core)
+{
+ int err;
+ struct si476x_rsq_status_args args = {
+ .primary = false,
+ .rsqack = false,
+ .attune = false,
+ .cancel = false,
+ .stcack = true,
+ };
+
+ switch (core->power_up_parameters.func) {
+ case SI476X_FUNC_FM_RECEIVER:
+ err = si476x_core_cmd_fm_rsq_status(core, &args, NULL);
+ break;
+ case SI476X_FUNC_AM_RECEIVER:
+ err = si476x_core_cmd_am_rsq_status(core, &args, NULL);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int si476x_cmd_tune_seek_freq(struct si476x_core *core,
+ uint8_t cmd,
+ const uint8_t args[], size_t argn,
+ uint8_t *resp, size_t respn)
+{
+ int err;
+
+
+ atomic_set(&core->stc, 0);
+ err = si476x_core_send_command(core, cmd, args, argn, resp, respn,
+ SI476X_TIMEOUT_TUNE);
+ if (!err) {
+ wait_event_killable(core->tuning,
+ atomic_read(&core->stc));
+ si476x_cmd_clear_stc(core);
+ }
+
+ return err;
+}
+
+/**
+ * si476x_cmd_func_info() - send 'FUNC_INFO' command to the device
+ * @core: device to send the command to
+ * @info: struct si476x_func_info to fill all the information
+ * returned by the command
+ *
+ * The command requests the firmware and patch version for currently
+ * loaded firmware (dependent on the function of the device FM/AM/WB)
+ *
+ * Function returns 0 on succsess and negative error code on
+ * failure
+ */
+int si476x_core_cmd_func_info(struct si476x_core *core,
+ struct si476x_func_info *info)
+{
+ int err;
+ u8 resp[CMD_FUNC_INFO_NRESP];
+
+ err = si476x_core_send_command(core, CMD_FUNC_INFO,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+
+ info->firmware.major = resp[1];
+ info->firmware.minor[0] = resp[2];
+ info->firmware.minor[1] = resp[3];
+
+ info->patch_id = ((u16) resp[4] << 8) | resp[5];
+ info->func = resp[6];
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_func_info);
+
+/**
+ * si476x_cmd_set_property() - send 'SET_PROPERTY' command to the device
+ * @core: device to send the command to
+ * @property: property address
+ * @value: property value
+ *
+ * Function returns 0 on succsess and negative error code on
+ * failure
+ */
+int si476x_core_cmd_set_property(struct si476x_core *core,
+ u16 property, u16 value)
+{
+ u8 resp[CMD_SET_PROPERTY_NRESP];
+ const u8 args[CMD_SET_PROPERTY_NARGS] = {
+ 0x00,
+ msb(property),
+ lsb(property),
+ msb(value),
+ lsb(value),
+ };
+
+ return si476x_core_send_command(core, CMD_SET_PROPERTY,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_set_property);
+
+/**
+ * si476x_cmd_get_property() - send 'GET_PROPERTY' command to the device
+ * @core: device to send the command to
+ * @property: property address
+ *
+ * Function return the value of property as u16 on success or a
+ * negative error on failure
+ */
+int si476x_core_cmd_get_property(struct si476x_core *core, u16 property)
+{
+ int err;
+ u8 resp[CMD_GET_PROPERTY_NRESP];
+ const u8 args[CMD_GET_PROPERTY_NARGS] = {
+ 0x00,
+ msb(property),
+ lsb(property),
+ };
+
+ err = si476x_core_send_command(core, CMD_GET_PROPERTY,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ if (err < 0)
+ return err;
+ else
+ return be16_to_cpup((__be16 *)(resp + 2));
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
+
+/**
+ * si476x_cmd_dig_audio_pin_cfg() - send 'DIG_AUDIO_PIN_CFG' command to
+ * the device
+ * @core: device to send the command to
+ * @dclk: DCLK pin function configuration:
+ * #SI476X_DCLK_NOOP - do not modify the behaviour
+ * #SI476X_DCLK_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * #SI476X_DCLK_DAUDIO - set the pin to be a part of digital
+ * audio interface
+ * @dfs: DFS pin function configuration:
+ * #SI476X_DFS_NOOP - do not modify the behaviour
+ * #SI476X_DFS_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_DFS_DAUDIO - set the pin to be a part of digital
+ * audio interface
+ * @dout - DOUT pin function configuration:
+ * SI476X_DOUT_NOOP - do not modify the behaviour
+ * SI476X_DOUT_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_DOUT_I2S_OUTPUT - set this pin to be digital out on I2S
+ * port 1
+ * SI476X_DOUT_I2S_INPUT - set this pin to be digital in on I2S
+ * port 1
+ * @xout - XOUT pin function configuration:
+ * SI476X_XOUT_NOOP - do not modify the behaviour
+ * SI476X_XOUT_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_XOUT_I2S_INPUT - set this pin to be digital in on I2S
+ * port 1
+ * SI476X_XOUT_MODE_SELECT - set this pin to be the input that
+ * selects the mode of the I2S audio
+ * combiner (analog or HD)
+ * [SI4761/63/65/67 Only]
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_dig_audio_pin_cfg(struct si476x_core *core,
+ enum si476x_dclk_config dclk,
+ enum si476x_dfs_config dfs,
+ enum si476x_dout_config dout,
+ enum si476x_xout_config xout)
+{
+ u8 resp[CMD_DIG_AUDIO_PIN_CFG_NRESP];
+ const u8 args[CMD_DIG_AUDIO_PIN_CFG_NARGS] = {
+ PIN_CFG_BYTE(dclk),
+ PIN_CFG_BYTE(dfs),
+ PIN_CFG_BYTE(dout),
+ PIN_CFG_BYTE(xout),
+ };
+
+ return si476x_core_send_command(core, CMD_DIG_AUDIO_PIN_CFG,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_dig_audio_pin_cfg);
+
+/**
+ * si476x_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND'
+ * @core - device to send the command to
+ * @iqclk - IQCL pin function configuration:
+ * SI476X_IQCLK_NOOP - do not modify the behaviour
+ * SI476X_IQCLK_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_IQCLK_IQ - set pin to be a part of I/Q interace
+ * in master mode
+ * @iqfs - IQFS pin function configuration:
+ * SI476X_IQFS_NOOP - do not modify the behaviour
+ * SI476X_IQFS_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_IQFS_IQ - set pin to be a part of I/Q interace
+ * in master mode
+ * @iout - IOUT pin function configuration:
+ * SI476X_IOUT_NOOP - do not modify the behaviour
+ * SI476X_IOUT_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_IOUT_OUTPUT - set pin to be I out
+ * @qout - QOUT pin function configuration:
+ * SI476X_QOUT_NOOP - do not modify the behaviour
+ * SI476X_QOUT_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_QOUT_OUTPUT - set pin to be Q out
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_zif_pin_cfg(struct si476x_core *core,
+ enum si476x_iqclk_config iqclk,
+ enum si476x_iqfs_config iqfs,
+ enum si476x_iout_config iout,
+ enum si476x_qout_config qout)
+{
+ u8 resp[CMD_ZIF_PIN_CFG_NRESP];
+ const u8 args[CMD_ZIF_PIN_CFG_NARGS] = {
+ PIN_CFG_BYTE(iqclk),
+ PIN_CFG_BYTE(iqfs),
+ PIN_CFG_BYTE(iout),
+ PIN_CFG_BYTE(qout),
+ };
+
+ return si476x_core_send_command(core, CMD_ZIF_PIN_CFG,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_zif_pin_cfg);
+
+/**
+ * si476x_cmd_ic_link_gpo_ctl_pin_cfg - send
+ * 'IC_LINK_GPIO_CTL_PIN_CFG' comand to the device
+ * @core - device to send the command to
+ * @icin - ICIN pin function configuration:
+ * SI476X_ICIN_NOOP - do not modify the behaviour
+ * SI476X_ICIN_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_ICIN_GPO1_HIGH - set pin to be an output, drive it high
+ * SI476X_ICIN_GPO1_LOW - set pin to be an output, drive it low
+ * SI476X_ICIN_IC_LINK - set the pin to be a part of Inter-Chip link
+ * @icip - ICIP pin function configuration:
+ * SI476X_ICIP_NOOP - do not modify the behaviour
+ * SI476X_ICIP_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_ICIP_GPO1_HIGH - set pin to be an output, drive it high
+ * SI476X_ICIP_GPO1_LOW - set pin to be an output, drive it low
+ * SI476X_ICIP_IC_LINK - set the pin to be a part of Inter-Chip link
+ * @icon - ICON pin function configuration:
+ * SI476X_ICON_NOOP - do not modify the behaviour
+ * SI476X_ICON_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_ICON_I2S - set the pin to be a part of audio
+ * interface in slave mode (DCLK)
+ * SI476X_ICON_IC_LINK - set the pin to be a part of Inter-Chip link
+ * @icop - ICOP pin function configuration:
+ * SI476X_ICOP_NOOP - do not modify the behaviour
+ * SI476X_ICOP_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_ICOP_I2S - set the pin to be a part of audio
+ * interface in slave mode (DOUT)
+ * [Si4761/63/65/67 Only]
+ * SI476X_ICOP_IC_LINK - set the pin to be a part of Inter-Chip link
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_ic_link_gpo_ctl_pin_cfg(struct si476x_core *core,
+ enum si476x_icin_config icin,
+ enum si476x_icip_config icip,
+ enum si476x_icon_config icon,
+ enum si476x_icop_config icop)
+{
+ u8 resp[CMD_IC_LINK_GPO_CTL_PIN_CFG_NRESP];
+ const u8 args[CMD_IC_LINK_GPO_CTL_PIN_CFG_NARGS] = {
+ PIN_CFG_BYTE(icin),
+ PIN_CFG_BYTE(icip),
+ PIN_CFG_BYTE(icon),
+ PIN_CFG_BYTE(icop),
+ };
+
+ return si476x_core_send_command(core, CMD_IC_LINK_GPO_CTL_PIN_CFG,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_ic_link_gpo_ctl_pin_cfg);
+
+/**
+ * si476x_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the
+ * device
+ * @core - device to send the command to
+ * @lrout - LROUT pin function configuration:
+ * SI476X_LROUT_NOOP - do not modify the behaviour
+ * SI476X_LROUT_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_LROUT_AUDIO - set pin to be audio output
+ * SI476X_LROUT_MPX - set pin to be MPX output
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_ana_audio_pin_cfg(struct si476x_core *core,
+ enum si476x_lrout_config lrout)
+{
+ u8 resp[CMD_ANA_AUDIO_PIN_CFG_NRESP];
+ const u8 args[CMD_ANA_AUDIO_PIN_CFG_NARGS] = {
+ PIN_CFG_BYTE(lrout),
+ };
+
+ return si476x_core_send_command(core, CMD_ANA_AUDIO_PIN_CFG,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_ana_audio_pin_cfg);
+
+
+/**
+ * si476x_cmd_intb_pin_cfg - send 'INTB_PIN_CFG' command to the device
+ * @core - device to send the command to
+ * @intb - INTB pin function configuration:
+ * SI476X_INTB_NOOP - do not modify the behaviour
+ * SI476X_INTB_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_INTB_DAUDIO - set pin to be a part of digital
+ * audio interface in slave mode
+ * SI476X_INTB_IRQ - set pin to be an interrupt request line
+ * @a1 - A1 pin function configuration:
+ * SI476X_A1_NOOP - do not modify the behaviour
+ * SI476X_A1_TRISTATE - put the pin in tristate condition,
+ * enable 1MOhm pulldown
+ * SI476X_A1_IRQ - set pin to be an interrupt request line
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+static int si476x_core_cmd_intb_pin_cfg_a10(struct si476x_core *core,
+ enum si476x_intb_config intb,
+ enum si476x_a1_config a1)
+{
+ u8 resp[CMD_INTB_PIN_CFG_A10_NRESP];
+ const u8 args[CMD_INTB_PIN_CFG_NARGS] = {
+ PIN_CFG_BYTE(intb),
+ PIN_CFG_BYTE(a1),
+ };
+
+ return si476x_core_send_command(core, CMD_INTB_PIN_CFG,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+
+static int si476x_core_cmd_intb_pin_cfg_a20(struct si476x_core *core,
+ enum si476x_intb_config intb,
+ enum si476x_a1_config a1)
+{
+ u8 resp[CMD_INTB_PIN_CFG_A20_NRESP];
+ const u8 args[CMD_INTB_PIN_CFG_NARGS] = {
+ PIN_CFG_BYTE(intb),
+ PIN_CFG_BYTE(a1),
+ };
+
+ return si476x_core_send_command(core, CMD_INTB_PIN_CFG,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+
+
+
+/**
+ * si476x_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the
+ * device
+ * @core - device to send the command to
+ * @rsqack - if set command clears RSQINT, SNRINT, SNRLINT, RSSIHINT,
+ * RSSSILINT, BLENDINT, MULTHINT and MULTLINT
+ * @attune - when set the values in the status report are the values
+ * that were calculated at tune
+ * @cancel - abort ongoing seek/tune opertation
+ * @stcack - clear the STCINT bin in status register
+ * @report - all signal quality information retured by the command
+ * (if NULL then the output of the command is ignored)
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_am_rsq_status(struct si476x_core *core,
+ struct si476x_rsq_status_args *rsqargs,
+ struct si476x_rsq_status_report *report)
+{
+ int err;
+ u8 resp[CMD_AM_RSQ_STATUS_NRESP];
+ const u8 args[CMD_AM_RSQ_STATUS_NARGS] = {
+ rsqargs->rsqack << 3 | rsqargs->attune << 2 |
+ rsqargs->cancel << 1 | rsqargs->stcack,
+ };
+
+ err = si476x_core_send_command(core, CMD_AM_RSQ_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ /*
+ * Besides getting received signal quality information this
+ * command can be used to just acknowledge different interrupt
+ * flags in those cases it is useless to copy and parse
+ * received data so user can pass NULL, and thus avoid
+ * unnecessary copying.
+ */
+ if (!report)
+ return err;
+
+ report->snrhint = 0b00001000 & resp[1];
+ report->snrlint = 0b00000100 & resp[1];
+ report->rssihint = 0b00000010 & resp[1];
+ report->rssilint = 0b00000001 & resp[1];
+
+ report->bltf = 0b10000000 & resp[2];
+ report->snr_ready = 0b00100000 & resp[2];
+ report->rssiready = 0b00001000 & resp[2];
+ report->afcrl = 0b00000010 & resp[2];
+ report->valid = 0b00000001 & resp[2];
+
+ report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->freqoff = resp[5];
+ report->rssi = resp[6];
+ report->snr = resp[7];
+ report->lassi = resp[9];
+ report->hassi = resp[10];
+ report->mult = resp[11];
+ report->dev = resp[12];
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_am_rsq_status);
+
+int si476x_core_cmd_fm_acf_status(struct si476x_core *core,
+ struct si476x_acf_status_report *report)
+{
+ int err;
+ u8 resp[CMD_FM_ACF_STATUS_NRESP];
+ const u8 args[CMD_FM_ACF_STATUS_NARGS] = {
+ 0x0,
+ };
+
+ if (!report)
+ return -EINVAL;
+
+ err = si476x_core_send_command(core, CMD_FM_ACF_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ report->blend_int = resp[1] & SI476X_ACF_BLEND_INT;
+ report->hblend_int = resp[1] & SI476X_ACF_HIBLEND_INT;
+ report->hicut_int = resp[1] & SI476X_ACF_HICUT_INT;
+ report->chbw_int = resp[1] & SI476X_ACF_CHBW_INT;
+ report->softmute_int = resp[1] & SI476X_ACF_SOFTMUTE_INT;
+ report->smute = resp[2] & SI476X_ACF_SMUTE;
+ report->smattn = resp[3] & SI476X_ACF_SMATTN;
+ report->chbw = resp[4];
+ report->hicut = resp[5];
+ report->hiblend = resp[6];
+ report->pilot = resp[7] & SI476X_ACF_PILOT;
+ report->stblend = resp[7] & SI476X_ACF_STBLEND;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_acf_status);
+
+int si476x_core_cmd_am_acf_status(struct si476x_core *core,
+ struct si476x_acf_status_report *report)
+{
+ int err;
+ u8 resp[CMD_AM_ACF_STATUS_NRESP];
+ const u8 args[CMD_AM_ACF_STATUS_NARGS] = {
+ 0x0,
+ };
+
+ if (!report)
+ return -EINVAL;
+
+ err = si476x_core_send_command(core, CMD_AM_ACF_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ report->blend_int = resp[1] & SI476X_ACF_BLEND_INT;
+ report->hblend_int = resp[1] & SI476X_ACF_HIBLEND_INT;
+ report->hicut_int = resp[1] & SI476X_ACF_HICUT_INT;
+ report->chbw_int = resp[1] & SI476X_ACF_CHBW_INT;
+ report->softmute_int = resp[1] & SI476X_ACF_SOFTMUTE_INT;
+ report->smute = resp[2] & SI476X_ACF_SMUTE;
+ report->smattn = resp[3] & SI476X_ACF_SMATTN;
+ report->chbw = resp[4];
+ report->hicut = resp[5];
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_am_acf_status);
+
+
+/**
+ * si476x_cmd_fm_seek_start - send 'FM_SEEK_START' command to the
+ * device
+ * @core - device to send the command to
+ * @seekup - if set the direction of the search is 'up'
+ * @wrap - if set seek wraps when hitting band limit
+ *
+ * This function begins search for a valid station. The station is
+ * considered valid when 'FM_VALID_SNR_THRESHOLD' and
+ * 'FM_VALID_RSSI_THRESHOLD' and 'FM_VALID_MAX_TUNE_ERROR' criteria
+ * are met.
+} *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_fm_seek_start(struct si476x_core *core,
+ bool seekup, bool wrap)
+{
+ u8 resp[CMD_FM_SEEK_START_NRESP];
+ const u8 args[CMD_FM_SEEK_START_NARGS] = {
+ seekup << 3 | wrap << 2,
+ };
+
+ return si476x_cmd_tune_seek_freq(core, CMD_FM_SEEK_START,
+ args, sizeof(args),
+ resp, sizeof(resp));
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start);
+
+/**
+ * si476x_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the
+ * device
+ * @core - device to send the command to
+ * @status_only - if set the data is not removed from RDSFIFO,
+ * RDSFIFOUSED is not decremented and data in all the
+ * rest RDS data contains the last valid info received
+ * @mtfifo if set the command clears RDS receive FIFO
+ * @intack if set the command clards the RDSINT bit.
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_fm_rds_status(struct si476x_core *core,
+ bool status_only,
+ bool mtfifo,
+ bool intack,
+ struct si476x_rds_status_report *report)
+{
+ int err;
+ u8 resp[CMD_FM_RDS_STATUS_NRESP];
+ const u8 args[CMD_FM_RDS_STATUS_NARGS] = {
+ status_only << 2 | mtfifo << 1 | intack,
+ };
+
+ err = si476x_core_send_command(core, CMD_FM_RDS_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ /*
+ * Besides getting RDS status information this command can be
+ * used to just acknowledge different interrupt flags in those
+ * cases it is useless to copy and parse received data so user
+ * can pass NULL, and thus avoid unnecessary copying.
+ */
+ if (err < 0 || report == NULL)
+ return err;
+
+ report->rdstpptyint = 0b00010000 & resp[1];
+ report->rdspiint = 0b00001000 & resp[1];
+ report->rdssyncint = 0b00000010 & resp[1];
+ report->rdsfifoint = 0b00000001 & resp[1];
+
+ report->tpptyvalid = 0b00010000 & resp[2];
+ report->pivalid = 0b00001000 & resp[2];
+ report->rdssync = 0b00000010 & resp[2];
+ report->rdsfifolost = 0b00000001 & resp[2];
+
+ report->tp = 0b00100000 & resp[3];
+ report->pty = 0b00011111 & resp[3];
+
+ report->pi = be16_to_cpup((__be16 *)(resp + 4));
+ report->rdsfifoused = resp[6];
+
+ report->ble[V4L2_RDS_BLOCK_A] = 0b11000000 & resp[7];
+ report->ble[V4L2_RDS_BLOCK_B] = 0b00110000 & resp[7];
+ report->ble[V4L2_RDS_BLOCK_C] = 0b00001100 & resp[7];
+ report->ble[V4L2_RDS_BLOCK_D] = 0b00000011 & resp[7];
+
+ report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A;
+ report->rds[V4L2_RDS_BLOCK_A].msb = resp[8];
+ report->rds[V4L2_RDS_BLOCK_A].lsb = resp[9];
+
+ report->rds[V4L2_RDS_BLOCK_B].block = V4L2_RDS_BLOCK_B;
+ report->rds[V4L2_RDS_BLOCK_B].msb = resp[10];
+ report->rds[V4L2_RDS_BLOCK_B].lsb = resp[11];
+
+ report->rds[V4L2_RDS_BLOCK_C].block = V4L2_RDS_BLOCK_C;
+ report->rds[V4L2_RDS_BLOCK_C].msb = resp[12];
+ report->rds[V4L2_RDS_BLOCK_C].lsb = resp[13];
+
+ report->rds[V4L2_RDS_BLOCK_D].block = V4L2_RDS_BLOCK_D;
+ report->rds[V4L2_RDS_BLOCK_D].msb = resp[14];
+ report->rds[V4L2_RDS_BLOCK_D].lsb = resp[15];
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_rds_status);
+
+int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core,
+ bool clear,
+ struct si476x_rds_blockcount_report *report)
+{
+ int err;
+ u8 resp[CMD_FM_RDS_BLOCKCOUNT_NRESP];
+ const u8 args[CMD_FM_RDS_BLOCKCOUNT_NARGS] = {
+ clear,
+ };
+
+ if (!report)
+ return -EINVAL;
+
+ err = si476x_core_send_command(core, CMD_FM_RDS_BLOCKCOUNT,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+
+ if (!err) {
+ report->expected = be16_to_cpup((__be16 *)(resp + 2));
+ report->received = be16_to_cpup((__be16 *)(resp + 4));
+ report->uncorrectable = be16_to_cpup((__be16 *)(resp + 6));
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_rds_blockcount);
+
+int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core,
+ enum si476x_phase_diversity_mode mode)
+{
+ u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP];
+ const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = {
+ mode & 0b111,
+ };
+
+ return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_diversity);
+/**
+ * si476x_core_cmd_fm_phase_div_status() - get the phase diversity
+ * status
+ *
+ * @core: si476x device
+ *
+ * NOTE caller must hold core lock
+ *
+ * Function returns the value of the status bit in case of success and
+ * negative error code in case of failre.
+ */
+int si476x_core_cmd_fm_phase_div_status(struct si476x_core *core)
+{
+ int err;
+ u8 resp[CMD_FM_PHASE_DIV_STATUS_NRESP];
+
+ err = si476x_core_send_command(core, CMD_FM_PHASE_DIV_STATUS,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+
+ return (err < 0) ? err : resp[1];
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_div_status);
+
+
+/**
+ * si476x_cmd_am_seek_start - send 'FM_SEEK_START' command to the
+ * device
+ * @core - device to send the command to
+ * @seekup - if set the direction of the search is 'up'
+ * @wrap - if set seek wraps when hitting band limit
+ *
+ * This function begins search for a valid station. The station is
+ * considered valid when 'FM_VALID_SNR_THRESHOLD' and
+ * 'FM_VALID_RSSI_THRESHOLD' and 'FM_VALID_MAX_TUNE_ERROR' criteria
+ * are met.
+ *
+ * Function returns 0 on success and negative error code on failure
+ */
+int si476x_core_cmd_am_seek_start(struct si476x_core *core,
+ bool seekup, bool wrap)
+{
+ u8 resp[CMD_AM_SEEK_START_NRESP];
+ const u8 args[CMD_AM_SEEK_START_NARGS] = {
+ seekup << 3 | wrap << 2,
+ };
+
+ return si476x_cmd_tune_seek_freq(core, CMD_AM_SEEK_START,
+ args, sizeof(args),
+ resp, sizeof(resp));
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_am_seek_start);
+
+
+
+static int si476x_core_cmd_power_up_a10(struct si476x_core *core,
+ struct si476x_power_up_args *puargs)
+{
+ u8 resp[CMD_POWER_UP_A10_NRESP];
+ const bool intsel = (core->pinmux.a1 == SI476X_A1_IRQ);
+ const bool ctsen = (core->client->irq != 0);
+ const u8 args[CMD_POWER_UP_A10_NARGS] = {
+ 0xF7, /* Reserved, always 0xF7 */
+ 0x3F & puargs->xcload, /* First two bits are reserved to be
+ * zeros */
+ ctsen << 7 | intsel << 6 | 0x07, /* Last five bits
+ * are reserved to
+ * be written as 0x7 */
+ puargs->func << 4 | puargs->freq,
+ 0x11, /* Reserved, always 0x11 */
+ };
+
+ return si476x_core_send_command(core, CMD_POWER_UP,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_TIMEOUT_POWER_UP);
+}
+
+static int si476x_core_cmd_power_up_a20(struct si476x_core *core,
+ struct si476x_power_up_args *puargs)
+{
+ u8 resp[CMD_POWER_UP_A20_NRESP];
+ const bool intsel = (core->pinmux.a1 == SI476X_A1_IRQ);
+ const bool ctsen = (core->client->irq != 0);
+ const u8 args[CMD_POWER_UP_A20_NARGS] = {
+ puargs->ibias6x << 7 | puargs->xstart,
+ 0x3F & puargs->xcload, /* First two bits are reserved to be
+ * zeros */
+ ctsen << 7 | intsel << 6 | puargs->fastboot << 5 |
+ puargs->xbiashc << 3 | puargs->xbias,
+ puargs->func << 4 | puargs->freq,
+ 0x10 | puargs->xmode,
+ };
+
+ return si476x_core_send_command(core, CMD_POWER_UP,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_TIMEOUT_POWER_UP);
+}
+
+static int si476x_core_cmd_power_down_a10(struct si476x_core *core,
+ struct si476x_power_down_args *pdargs)
+{
+ u8 resp[CMD_POWER_DOWN_A10_NRESP];
+
+ return si476x_core_send_command(core, CMD_POWER_DOWN,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+
+static int si476x_core_cmd_power_down_a20(struct si476x_core *core,
+ struct si476x_power_down_args *pdargs)
+{
+ u8 resp[CMD_POWER_DOWN_A20_NRESP];
+ const u8 args[CMD_POWER_DOWN_A20_NARGS] = {
+ pdargs->xosc,
+ };
+ return si476x_core_send_command(core, CMD_POWER_DOWN,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+}
+
+static int si476x_core_cmd_am_tune_freq_a10(struct si476x_core *core,
+ struct si476x_tune_freq_args *tuneargs)
+{
+
+ const int am_freq = tuneargs->freq;
+ u8 resp[CMD_AM_TUNE_FREQ_NRESP];
+ const u8 args[CMD_AM_TUNE_FREQ_NARGS] = {
+ (tuneargs->hd << 6),
+ msb(am_freq),
+ lsb(am_freq),
+ };
+
+ return si476x_cmd_tune_seek_freq(core, CMD_AM_TUNE_FREQ, args,
+ sizeof(args),
+ resp, sizeof(resp));
+}
+
+static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core,
+ struct si476x_tune_freq_args *tuneargs)
+{
+ const int am_freq = tuneargs->freq;
+ u8 resp[CMD_AM_TUNE_FREQ_NRESP];
+ const u8 args[CMD_AM_TUNE_FREQ_NARGS] = {
+ (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11),
+ msb(am_freq),
+ lsb(am_freq),
+ };
+
+ return si476x_cmd_tune_seek_freq(core, CMD_AM_TUNE_FREQ,
+ args, sizeof(args),
+ resp, sizeof(resp));
+}
+
+static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
+ struct si476x_rsq_status_args *rsqargs,
+ struct si476x_rsq_status_report *report)
+{
+ int err;
+ u8 resp[CMD_FM_RSQ_STATUS_A10_NRESP];
+ const u8 args[CMD_FM_RSQ_STATUS_A10_NARGS] = {
+ rsqargs->rsqack << 3 | rsqargs->attune << 2 |
+ rsqargs->cancel << 1 | rsqargs->stcack,
+ };
+
+ err = si476x_core_send_command(core, CMD_FM_RSQ_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ /*
+ * Besides getting received signal quality information this
+ * command can be used to just acknowledge different interrupt
+ * flags in those cases it is useless to copy and parse
+ * received data so user can pass NULL, and thus avoid
+ * unnecessary copying.
+ */
+ if (err < 0 || report == NULL)
+ return err;
+
+ report->multhint = 0b10000000 & resp[1];
+ report->multlint = 0b01000000 & resp[1];
+ report->snrhint = 0b00001000 & resp[1];
+ report->snrlint = 0b00000100 & resp[1];
+ report->rssihint = 0b00000010 & resp[1];
+ report->rssilint = 0b00000001 & resp[1];
+
+ report->bltf = 0b10000000 & resp[2];
+ report->snr_ready = 0b00100000 & resp[2];
+ report->rssiready = 0b00001000 & resp[2];
+ report->afcrl = 0b00000010 & resp[2];
+ report->valid = 0b00000001 & resp[2];
+
+ report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->freqoff = resp[5];
+ report->rssi = resp[6];
+ report->snr = resp[7];
+ report->lassi = resp[9];
+ report->hassi = resp[10];
+ report->mult = resp[11];
+ report->dev = resp[12];
+ report->readantcap = be16_to_cpup((__be16 *)(resp + 13));
+ report->assi = resp[15];
+ report->usn = resp[16];
+
+ return err;
+}
+
+static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
+ struct si476x_rsq_status_args *rsqargs,
+ struct si476x_rsq_status_report *report)
+{
+ int err;
+ u8 resp[CMD_FM_RSQ_STATUS_A10_NRESP];
+ const u8 args[CMD_FM_RSQ_STATUS_A30_NARGS] = {
+ rsqargs->primary << 4 | rsqargs->rsqack << 3 |
+ rsqargs->attune << 2 | rsqargs->cancel << 1 |
+ rsqargs->stcack,
+ };
+
+ err = si476x_core_send_command(core, CMD_FM_RSQ_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ /*
+ * Besides getting received signal quality information this
+ * command can be used to just acknowledge different interrupt
+ * flags in those cases it is useless to copy and parse
+ * received data so user can pass NULL, and thus avoid
+ * unnecessary copying.
+ */
+ if (err < 0 || report == NULL)
+ return err;
+
+ report->multhint = 0b10000000 & resp[1];
+ report->multlint = 0b01000000 & resp[1];
+ report->snrhint = 0b00001000 & resp[1];
+ report->snrlint = 0b00000100 & resp[1];
+ report->rssihint = 0b00000010 & resp[1];
+ report->rssilint = 0b00000001 & resp[1];
+
+ report->bltf = 0b10000000 & resp[2];
+ report->snr_ready = 0b00100000 & resp[2];
+ report->rssiready = 0b00001000 & resp[2];
+ report->afcrl = 0b00000010 & resp[2];
+ report->valid = 0b00000001 & resp[2];
+
+ report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->freqoff = resp[5];
+ report->rssi = resp[6];
+ report->snr = resp[7];
+ report->lassi = resp[9];
+ report->hassi = resp[10];
+ report->mult = resp[11];
+ report->dev = resp[12];
+ report->readantcap = be16_to_cpup((__be16 *)(resp + 13));
+ report->assi = resp[15];
+ report->usn = resp[16];
+
+ return err;
+}
+
+
+static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
+ struct si476x_rsq_status_args *rsqargs,
+ struct si476x_rsq_status_report *report)
+{
+ int err;
+ u8 resp[CMD_FM_RSQ_STATUS_A30_NRESP];
+ const u8 args[CMD_FM_RSQ_STATUS_A30_NARGS] = {
+ rsqargs->primary << 4 | rsqargs->rsqack << 3 |
+ rsqargs->attune << 2 | rsqargs->cancel << 1 |
+ rsqargs->stcack,
+ };
+
+ err = si476x_core_send_command(core, CMD_FM_RSQ_STATUS,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ /*
+ * Besides getting received signal quality information this
+ * command can be used to just acknowledge different interrupt
+ * flags in those cases it is useless to copy and parse
+ * received data so user can pass NULL, and thus avoid
+ * unnecessary copying.
+ */
+ if (err < 0 || report == NULL)
+ return err;
+
+ report->multhint = 0b10000000 & resp[1];
+ report->multlint = 0b01000000 & resp[1];
+ report->snrhint = 0b00001000 & resp[1];
+ report->snrlint = 0b00000100 & resp[1];
+ report->rssihint = 0b00000010 & resp[1];
+ report->rssilint = 0b00000001 & resp[1];
+
+ report->bltf = 0b10000000 & resp[2];
+ report->snr_ready = 0b00100000 & resp[2];
+ report->rssiready = 0b00001000 & resp[2];
+ report->injside = 0b00000100 & resp[2];
+ report->afcrl = 0b00000010 & resp[2];
+ report->valid = 0b00000001 & resp[2];
+
+ report->readfreq = be16_to_cpup((__be16 *)(resp + 3));
+ report->freqoff = resp[5];
+ report->rssi = resp[6];
+ report->snr = resp[7];
+ report->issi = resp[8];
+ report->lassi = resp[9];
+ report->hassi = resp[10];
+ report->mult = resp[11];
+ report->dev = resp[12];
+ report->readantcap = be16_to_cpup((__be16 *)(resp + 13));
+ report->assi = resp[15];
+ report->usn = resp[16];
+
+ report->pilotdev = resp[17];
+ report->rdsdev = resp[18];
+ report->assidev = resp[19];
+ report->strongdev = resp[20];
+ report->rdspi = be16_to_cpup((__be16 *)(resp + 21));
+
+ return err;
+}
+
+static int si476x_core_cmd_fm_tune_freq_a10(struct si476x_core *core,
+ struct si476x_tune_freq_args *tuneargs)
+{
+ u8 resp[CMD_FM_TUNE_FREQ_NRESP];
+ const u8 args[CMD_FM_TUNE_FREQ_A10_NARGS] = {
+ (tuneargs->hd << 6) | (tuneargs->tunemode << 4)
+ | (tuneargs->smoothmetrics << 2),
+ msb(tuneargs->freq),
+ lsb(tuneargs->freq),
+ msb(tuneargs->antcap),
+ lsb(tuneargs->antcap)
+ };
+
+ return si476x_cmd_tune_seek_freq(core, CMD_FM_TUNE_FREQ,
+ args, sizeof(args),
+ resp, sizeof(resp));
+}
+
+static int si476x_core_cmd_fm_tune_freq_a20(struct si476x_core *core,
+ struct si476x_tune_freq_args *tuneargs)
+{
+ u8 resp[CMD_FM_TUNE_FREQ_NRESP];
+ const u8 args[CMD_FM_TUNE_FREQ_A20_NARGS] = {
+ (tuneargs->hd << 6) | (tuneargs->tunemode << 4)
+ | (tuneargs->smoothmetrics << 2) | (tuneargs->injside),
+ msb(tuneargs->freq),
+ lsb(tuneargs->freq),
+ };
+
+ return si476x_cmd_tune_seek_freq(core, CMD_FM_TUNE_FREQ,
+ args, sizeof(args),
+ resp, sizeof(resp));
+}
+
+static int si476x_core_cmd_agc_status_a20(struct si476x_core *core,
+ struct si476x_agc_status_report *report)
+{
+ int err;
+ u8 resp[CMD_AGC_STATUS_NRESP_A20];
+
+ if (!report)
+ return -EINVAL;
+
+ err = si476x_core_send_command(core, CMD_AGC_STATUS,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ report->mxhi = resp[1] & SI476X_AGC_MXHI;
+ report->mxlo = resp[1] & SI476X_AGC_MXLO;
+ report->lnahi = resp[1] & SI476X_AGC_LNAHI;
+ report->lnalo = resp[1] & SI476X_AGC_LNALO;
+ report->fmagc1 = resp[2];
+ report->fmagc2 = resp[3];
+ report->pgagain = resp[4];
+ report->fmwblang = resp[5];
+
+ return err;
+}
+
+static int si476x_core_cmd_agc_status_a10(struct si476x_core *core,
+ struct si476x_agc_status_report *report)
+{
+ int err;
+ u8 resp[CMD_AGC_STATUS_NRESP_A10];
+
+ if (!report)
+ return -EINVAL;
+
+ err = si476x_core_send_command(core, CMD_AGC_STATUS,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ SI476X_DEFAULT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ report->mxhi = resp[1] & SI476X_AGC_MXHI;
+ report->mxlo = resp[1] & SI476X_AGC_MXLO;
+ report->lnahi = resp[1] & SI476X_AGC_LNAHI;
+ report->lnalo = resp[1] & SI476X_AGC_LNALO;
+
+ return err;
+}
+
+typedef int (*tune_freq_func_t) (struct si476x_core *core,
+ struct si476x_tune_freq_args *tuneargs);
+
+static struct {
+ int (*power_up) (struct si476x_core *,
+ struct si476x_power_up_args *);
+ int (*power_down) (struct si476x_core *,
+ struct si476x_power_down_args *);
+
+ tune_freq_func_t fm_tune_freq;
+ tune_freq_func_t am_tune_freq;
+
+ int (*fm_rsq_status)(struct si476x_core *,
+ struct si476x_rsq_status_args *,
+ struct si476x_rsq_status_report *);
+
+ int (*agc_status)(struct si476x_core *,
+ struct si476x_agc_status_report *);
+ int (*intb_pin_cfg)(struct si476x_core *core,
+ enum si476x_intb_config intb,
+ enum si476x_a1_config a1);
+} si476x_cmds_vtable[] = {
+ [SI476X_REVISION_A10] = {
+ .power_up = si476x_core_cmd_power_up_a10,
+ .power_down = si476x_core_cmd_power_down_a10,
+ .fm_tune_freq = si476x_core_cmd_fm_tune_freq_a10,
+ .am_tune_freq = si476x_core_cmd_am_tune_freq_a10,
+ .fm_rsq_status = si476x_core_cmd_fm_rsq_status_a10,
+ .agc_status = si476x_core_cmd_agc_status_a10,
+ .intb_pin_cfg = si476x_core_cmd_intb_pin_cfg_a10,
+ },
+ [SI476X_REVISION_A20] = {
+ .power_up = si476x_core_cmd_power_up_a20,
+ .power_down = si476x_core_cmd_power_down_a20,
+ .fm_tune_freq = si476x_core_cmd_fm_tune_freq_a20,
+ .am_tune_freq = si476x_core_cmd_am_tune_freq_a20,
+ .fm_rsq_status = si476x_core_cmd_fm_rsq_status_a20,
+ .agc_status = si476x_core_cmd_agc_status_a20,
+ .intb_pin_cfg = si476x_core_cmd_intb_pin_cfg_a20,
+ },
+ [SI476X_REVISION_A30] = {
+ .power_up = si476x_core_cmd_power_up_a20,
+ .power_down = si476x_core_cmd_power_down_a20,
+ .fm_tune_freq = si476x_core_cmd_fm_tune_freq_a20,
+ .am_tune_freq = si476x_core_cmd_am_tune_freq_a20,
+ .fm_rsq_status = si476x_core_cmd_fm_rsq_status_a30,
+ .agc_status = si476x_core_cmd_agc_status_a20,
+ .intb_pin_cfg = si476x_core_cmd_intb_pin_cfg_a20,
+ },
+};
+
+int si476x_core_cmd_power_up(struct si476x_core *core,
+ struct si476x_power_up_args *args)
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return si476x_cmds_vtable[core->revision].power_up(core, args);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_power_up);
+
+int si476x_core_cmd_power_down(struct si476x_core *core,
+ struct si476x_power_down_args *args)
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return si476x_cmds_vtable[core->revision].power_down(core, args);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_power_down);
+
+int si476x_core_cmd_fm_tune_freq(struct si476x_core *core,
+ struct si476x_tune_freq_args *args)
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return si476x_cmds_vtable[core->revision].fm_tune_freq(core, args);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_tune_freq);
+
+int si476x_core_cmd_am_tune_freq(struct si476x_core *core,
+ struct si476x_tune_freq_args *args)
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return si476x_cmds_vtable[core->revision].am_tune_freq(core, args);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_am_tune_freq);
+
+int si476x_core_cmd_fm_rsq_status(struct si476x_core *core,
+ struct si476x_rsq_status_args *args,
+ struct si476x_rsq_status_report *report)
+
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return si476x_cmds_vtable[core->revision].fm_rsq_status(core, args,
+ report);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_rsq_status);
+
+int si476x_core_cmd_agc_status(struct si476x_core *core,
+ struct si476x_agc_status_report *report)
+
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return si476x_cmds_vtable[core->revision].agc_status(core, report);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_agc_status);
+
+int si476x_core_cmd_intb_pin_cfg(struct si476x_core *core,
+ enum si476x_intb_config intb,
+ enum si476x_a1_config a1)
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+
+ return si476x_cmds_vtable[core->revision].intb_pin_cfg(core, intb, a1);
+}
+EXPORT_SYMBOL_GPL(si476x_core_cmd_intb_pin_cfg);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("API for command exchange for si476x");
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
new file mode 100644
index 0000000..f5bc8e4
--- /dev/null
+++ b/drivers/mfd/si476x-i2c.c
@@ -0,0 +1,886 @@
+/*
+ * drivers/mfd/si476x-i2c.c -- Core device driver for si476x MFD
+ * device
+ *
+ * Copyright (C) 2012 Innovative Converged Devices(ICD)
+ * Copyright (C) 2013 Andrey Smirnov
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include <linux/mfd/si476x-core.h>
+
+#define SI476X_MAX_IO_ERRORS 10
+#define SI476X_DRIVER_RDS_FIFO_DEPTH 128
+
+/**
+ * si476x_core_config_pinmux() - pin function configuration function
+ *
+ * @core: Core device structure
+ *
+ * Configure the functions of the pins of the radio chip.
+ *
+ * The function returns zero in case of succes or negative error code
+ * otherwise.
+ */
+static int si476x_core_config_pinmux(struct si476x_core *core)
+{
+ int err;
+ dev_dbg(&core->client->dev, "Configuring pinmux\n");
+ err = si476x_core_cmd_dig_audio_pin_cfg(core,
+ core->pinmux.dclk,
+ core->pinmux.dfs,
+ core->pinmux.dout,
+ core->pinmux.xout);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure digital audio pins(err = %d)\n",
+ err);
+ return err;
+ }
+
+ err = si476x_core_cmd_zif_pin_cfg(core,
+ core->pinmux.iqclk,
+ core->pinmux.iqfs,
+ core->pinmux.iout,
+ core->pinmux.qout);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure ZIF pins(err = %d)\n",
+ err);
+ return err;
+ }
+
+ err = si476x_core_cmd_ic_link_gpo_ctl_pin_cfg(core,
+ core->pinmux.icin,
+ core->pinmux.icip,
+ core->pinmux.icon,
+ core->pinmux.icop);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure IC-Link/GPO pins(err = %d)\n",
+ err);
+ return err;
+ }
+
+ err = si476x_core_cmd_ana_audio_pin_cfg(core,
+ core->pinmux.lrout);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure analog audio pins(err = %d)\n",
+ err);
+ return err;
+ }
+
+ err = si476x_core_cmd_intb_pin_cfg(core,
+ core->pinmux.intb,
+ core->pinmux.a1);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure interrupt pins(err = %d)\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void si476x_core_schedule_polling_work(struct si476x_core *core)
+{
+ schedule_delayed_work(&core->status_monitor,
+ usecs_to_jiffies(SI476X_STATUS_POLL_US));
+}
+
+/**
+ * si476x_core_start() - early chip startup function
+ * @core: Core device structure
+ * @soft: When set, this flag forces "soft" startup, where "soft"
+ * power down is the one done by sending appropriate command instead
+ * of using reset pin of the tuner
+ *
+ * Perform required startup sequence to correctly power
+ * up the chip and perform initial configuration. It does the
+ * following sequence of actions:
+ * 1. Claims and enables the power supplies VD and VIO1 required
+ * for I2C interface of the chip operation.
+ * 2. Waits for 100us, pulls the reset line up, enables irq,
+ * waits for another 100us as it is specified by the
+ * datasheet.
+ * 3. Sends 'POWER_UP' command to the device with all provided
+ * information about power-up parameters.
+ * 4. Configures, pin multiplexor, disables digital audio and
+ * configures interrupt sources.
+ *
+ * The function returns zero in case of succes or negative error code
+ * otherwise.
+ */
+int si476x_core_start(struct si476x_core *core, bool soft)
+{
+ struct i2c_client *client = core->client;
+ int err;
+
+ if (!soft) {
+ if (gpio_is_valid(core->gpio_reset))
+ gpio_set_value_cansleep(core->gpio_reset, 1);
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ udelay(100);
+
+ if (!client->irq) {
+ atomic_set(&core->is_alive, 1);
+ si476x_core_schedule_polling_work(core);
+ }
+ } else {
+ if (client->irq)
+ enable_irq(client->irq);
+ else {
+ atomic_set(&core->is_alive, 1);
+ si476x_core_schedule_polling_work(core);
+ }
+ }
+
+ err = si476x_core_cmd_power_up(core,
+ &core->power_up_parameters);
+
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Power up failure(err = %d)\n",
+ err);
+ goto disable_irq;
+ }
+
+ if (client->irq)
+ atomic_set(&core->is_alive, 1);
+
+ err = si476x_core_config_pinmux(core);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure pinmux(err = %d)\n",
+ err);
+ goto disable_irq;
+ }
+
+ if (client->irq) {
+ err = regmap_write(core->regmap,
+ SI476X_PROP_INT_CTL_ENABLE,
+ SI476X_RDSIEN |
+ SI476X_STCIEN |
+ SI476X_CTSIEN);
+ if (err < 0) {
+ dev_err(&core->client->dev,
+ "Failed to configure interrupt sources"
+ "(err = %d)\n", err);
+ goto disable_irq;
+ }
+ }
+
+ return 0;
+
+disable_irq:
+ if (err == -ENODEV)
+ atomic_set(&core->is_alive, 0);
+
+ if (client->irq)
+ disable_irq(client->irq);
+ else
+ cancel_delayed_work_sync(&core->status_monitor);
+
+ if (gpio_is_valid(core->gpio_reset))
+ gpio_set_value_cansleep(core->gpio_reset, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_start);
+
+/**
+ * si476x_core_stop() - chip power-down function
+ * @core: Core device structure
+ * @soft: When set, function sends a POWER_DOWN command instead of
+ * bringing reset line low
+ *
+ * Power down the chip by performing following actions:
+ * 1. Disable IRQ or stop the polling worker
+ * 2. Send the POWER_DOWN command if the power down is soft or bring
+ * reset line low if not.
+ *
+ * The function returns zero in case of succes or negative error code
+ * otherwise.
+ */
+int si476x_core_stop(struct si476x_core *core, bool soft)
+{
+ int err = 0;
+ atomic_set(&core->is_alive, 0);
+
+ if (soft) {
+ /* TODO: This probably shoud be a configurable option,
+ * so it is possible to have the chips keep their
+ * oscillators running
+ */
+ struct si476x_power_down_args args = {
+ .xosc = false,
+ };
+ err = si476x_core_cmd_power_down(core, &args);
+ }
+
+ /* We couldn't disable those before
+ * 'si476x_core_cmd_power_down' since we expect to get CTS
+ * interrupt */
+ if (core->client->irq)
+ disable_irq(core->client->irq);
+ else
+ cancel_delayed_work_sync(&core->status_monitor);
+
+ if (!soft) {
+ if (gpio_is_valid(core->gpio_reset))
+ gpio_set_value_cansleep(core->gpio_reset, 0);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_stop);
+
+/**
+ * si476x_core_set_power_state() - set the level at which the power is
+ * supplied for the chip.
+ * @core: Core device structure
+ * @next_state: enum si476x_power_state describing power state to
+ * switch to.
+ *
+ * Switch on all the required power supplies
+ *
+ * This function returns 0 in case of suvccess and negative error code
+ * otherwise.
+ */
+int si476x_core_set_power_state(struct si476x_core *core,
+ enum si476x_power_state next_state)
+{
+ /*
+ It is not clear form the datasheet if it is possible to
+ work with device if not all power domains are operational.
+ So for now the power-up policy is "power-up all the things!"
+ */
+ int err = 0;
+
+ if (core->power_state == SI476X_POWER_INCONSISTENT) {
+ dev_err(&core->client->dev,
+ "The device in inconsistent power state\n");
+ return -EINVAL;
+ }
+
+ if (next_state != core->power_state) {
+ switch (next_state) {
+ case SI476X_POWER_UP_FULL:
+ err = regulator_bulk_enable(ARRAY_SIZE(core->supplies),
+ core->supplies);
+ if (err < 0) {
+ core->power_state = SI476X_POWER_INCONSISTENT;
+ break;
+ }
+ /*
+ * Startup timing diagram recommends to have a
+ * 100 us delay between enabling of the power
+ * supplies and turning the tuner on.
+ */
+ udelay(100);
+
+ err = si476x_core_start(core, false);
+ if (err < 0)
+ goto disable_regulators;
+
+ core->power_state = next_state;
+ break;
+
+ case SI476X_POWER_DOWN:
+ core->power_state = next_state;
+ err = si476x_core_stop(core, false);
+ if (err < 0)
+ core->power_state = SI476X_POWER_INCONSISTENT;
+disable_regulators:
+ err = regulator_bulk_disable(ARRAY_SIZE(core->supplies),
+ core->supplies);
+ if (err < 0)
+ core->power_state = SI476X_POWER_INCONSISTENT;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_set_power_state);
+
+/**
+ * si476x_core_report_drainer_stop() - mark the completion of the RDS
+ * buffer drain porcess by the worker.
+ *
+ * @core: Core device structure
+ */
+static inline void si476x_core_report_drainer_stop(struct si476x_core *core)
+{
+ mutex_lock(&core->rds_drainer_status_lock);
+ core->rds_drainer_is_working = false;
+ mutex_unlock(&core->rds_drainer_status_lock);
+}
+
+/**
+ * si476x_core_start_rds_drainer_once() - start RDS drainer worker if
+ * ther is none working, do nothing otherwise
+ *
+ * @core: Datastructure corresponding to the chip.
+ */
+static inline void si476x_core_start_rds_drainer_once(struct si476x_core *core)
+{
+ mutex_lock(&core->rds_drainer_status_lock);
+ if (!core->rds_drainer_is_working) {
+ core->rds_drainer_is_working = true;
+ schedule_work(&core->rds_fifo_drainer);
+ }
+ mutex_unlock(&core->rds_drainer_status_lock);
+}
+/**
+ * si476x_drain_rds_fifo() - RDS buffer drainer.
+ * @work: struct work_struct being ppassed to the function by the
+ * kernel.
+ *
+ * Drain the contents of the RDS FIFO of
+ */
+static void si476x_core_drain_rds_fifo(struct work_struct *work)
+{
+ int err;
+
+ struct si476x_core *core = container_of(work, struct si476x_core,
+ rds_fifo_drainer);
+
+ struct si476x_rds_status_report report;
+
+ si476x_core_lock(core);
+ err = si476x_core_cmd_fm_rds_status(core, true, false, false, &report);
+ if (!err) {
+ int i = report.rdsfifoused;
+ dev_dbg(&core->client->dev,
+ "%d elements in RDS FIFO. Draining.\n", i);
+ for (; i > 0; --i) {
+ err = si476x_core_cmd_fm_rds_status(core, false, false,
+ (i == 1), &report);
+ if (err < 0)
+ goto unlock;
+
+ kfifo_in(&core->rds_fifo, report.rds,
+ sizeof(report.rds));
+ dev_dbg(&core->client->dev, "RDS data:\n %*ph\n",
+ (int)sizeof(report.rds), report.rds);
+ }
+ dev_dbg(&core->client->dev, "Drrrrained!\n");
+ wake_up_interruptible(&core->rds_read_queue);
+ }
+
+unlock:
+ si476x_core_unlock(core);
+ si476x_core_report_drainer_stop(core);
+}
+
+/**
+ * si476x_core_pronounce_dead()
+ *
+ * @core: Core device structure
+ *
+ * Mark the device as being dead and wake up all potentially waiting
+ * threads of execution.
+ *
+ */
+static void si476x_core_pronounce_dead(struct si476x_core *core)
+{
+ dev_info(&core->client->dev, "Core device is dead.\n");
+
+ atomic_set(&core->is_alive, 0);
+
+ /* Wake up al possible waiting processes */
+ wake_up_interruptible(&core->rds_read_queue);
+
+ atomic_set(&core->cts, 1);
+ wake_up(&core->command);
+
+ atomic_set(&core->stc, 1);
+ wake_up(&core->tuning);
+}
+
+/**
+ * si476x_core_i2c_xfer()
+ *
+ * @core: Core device structure
+ * @type: Transfer type
+ * @buf: Transfer buffer for/with data
+ * @count: Transfer buffer size
+ *
+ * Perfrom and I2C transfer(either read or write) and keep a counter
+ * of I/O errors. If the error counter rises above the threshold
+ * pronounce device dead.
+ *
+ * The function returns zero on succes or negative error code on
+ * failure.
+ */
+int si476x_core_i2c_xfer(struct si476x_core *core,
+ enum si476x_i2c_type type,
+ char *buf, int count)
+{
+ static int io_errors_count;
+ int err;
+ if (type == SI476X_I2C_SEND)
+ err = i2c_master_send(core->client, buf, count);
+ else
+ err = i2c_master_recv(core->client, buf, count);
+
+ if (err < 0) {
+ if (io_errors_count++ > SI476X_MAX_IO_ERRORS)
+ si476x_core_pronounce_dead(core);
+ } else {
+ io_errors_count = 0;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(si476x_core_i2c_xfer);
+
+/**
+ * si476x_get_status()
+ * @core: Core device structure
+ *
+ * Get the status byte of the core device by berforming one byte I2C
+ * read.
+ *
+ * The function returns a status value or a negative error code on
+ * error.
+ */
+static int si476x_core_get_status(struct si476x_core *core)
+{
+ u8 response;
+ int err = si476x_core_i2c_xfer(core, SI476X_I2C_RECV,
+ &response, sizeof(response));
+
+ return (err < 0) ? err : response;
+}
+
+/**
+ * si476x_get_and_signal_status() - IRQ dispatcher
+ * @core: Core device structure
+ *
+ * Dispatch the arrived interrupt request based on the value of the
+ * status byte reported by the tuner.
+ *
+ */
+static void si476x_core_get_and_signal_status(struct si476x_core *core)
+{
+ int status = si476x_core_get_status(core);
+ if (status < 0) {
+ dev_err(&core->client->dev, "Failed to get status\n");
+ return;
+ }
+
+ if (status & SI476X_CTS) {
+ /* Unfortunately completions could not be used for
+ * signalling CTS since this flag cannot be cleared
+ * in status byte, and therefore once it becomes true
+ * multiple calls to 'complete' would cause the
+ * commands following the current one to be completed
+ * before they actually are */
+ dev_dbg(&core->client->dev, "[interrupt] CTSINT\n");
+ atomic_set(&core->cts, 1);
+ wake_up(&core->command);
+ }
+
+ if (status & SI476X_FM_RDS_INT) {
+ dev_dbg(&core->client->dev, "[interrupt] RDSINT\n");
+ si476x_core_start_rds_drainer_once(core);
+ }
+
+ if (status & SI476X_STC_INT) {
+ dev_dbg(&core->client->dev, "[interrupt] STCINT\n");
+ atomic_set(&core->stc, 1);
+ wake_up(&core->tuning);
+ }
+}
+
+static void si476x_core_poll_loop(struct work_struct *work)
+{
+ struct si476x_core *core = SI476X_WORK_TO_CORE(work);
+
+ si476x_core_get_and_signal_status(core);
+
+ if (atomic_read(&core->is_alive))
+ si476x_core_schedule_polling_work(core);
+}
+
+static irqreturn_t si476x_core_interrupt(int irq, void *dev)
+{
+ struct si476x_core *core = dev;
+
+ si476x_core_get_and_signal_status(core);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * si476x_firmware_version_to_revision()
+ * @core: Core device structure
+ * @major: Firmware major number
+ * @minor1: Firmware first minor number
+ * @minor2: Firmware second minor number
+ *
+ * Convert a chip's firmware version number into an offset that later
+ * will be used to as offset in "vtable" of tuner functions
+ *
+ * This function returns a positive offset in case of success and a -1
+ * in case of failure.
+ */
+static int si476x_core_fwver_to_revision(struct si476x_core *core,
+ int func, int major,
+ int minor1, int minor2)
+{
+ switch (func) {
+ case SI476X_FUNC_FM_RECEIVER:
+ switch (major) {
+ case 5:
+ return SI476X_REVISION_A10;
+ case 8:
+ return SI476X_REVISION_A20;
+ case 10:
+ return SI476X_REVISION_A30;
+ default:
+ goto unknown_revision;
+ }
+ case SI476X_FUNC_AM_RECEIVER:
+ switch (major) {
+ case 5:
+ return SI476X_REVISION_A10;
+ case 7:
+ return SI476X_REVISION_A20;
+ case 9:
+ return SI476X_REVISION_A30;
+ default:
+ goto unknown_revision;
+ }
+ case SI476X_FUNC_WB_RECEIVER:
+ switch (major) {
+ case 3:
+ return SI476X_REVISION_A10;
+ case 5:
+ return SI476X_REVISION_A20;
+ case 7:
+ return SI476X_REVISION_A30;
+ default:
+ goto unknown_revision;
+ }
+ case SI476X_FUNC_BOOTLOADER:
+ default: /* FALLTHROUG */
+ BUG();
+ return -1;
+ }
+
+unknown_revision:
+ dev_err(&core->client->dev,
+ "Unsupported version of the firmware: %d.%d.%d, "
+ "reverting to A10 comptible functions\n",
+ major, minor1, minor2);
+
+ return SI476X_REVISION_A10;
+}
+
+/**
+ * si476x_get_revision_info()
+ * @core: Core device structure
+ *
+ * Get the firmware version number of the device. It is done in
+ * following three steps:
+ * 1. Power-up the device
+ * 2. Send the 'FUNC_INFO' command
+ * 3. Powering the device down.
+ *
+ * The function return zero on success and a negative error code on
+ * failure.
+ */
+static int si476x_core_get_revision_info(struct si476x_core *core)
+{
+ int rval;
+ struct si476x_func_info info;
+
+ si476x_core_lock(core);
+ rval = si476x_core_set_power_state(core, SI476X_POWER_UP_FULL);
+ if (rval < 0)
+ goto exit;
+
+ rval = si476x_core_cmd_func_info(core, &info);
+ if (rval < 0)
+ goto power_down;
+
+ core->revision = si476x_core_fwver_to_revision(core, info.func,
+ info.firmware.major,
+ info.firmware.minor[0],
+ info.firmware.minor[1]);
+power_down:
+ si476x_core_set_power_state(core, SI476X_POWER_DOWN);
+exit:
+ si476x_core_unlock(core);
+
+ return rval;
+}
+
+bool si476x_core_has_am(struct si476x_core *core)
+{
+ return core->chip_id == SI476X_CHIP_SI4761 ||
+ core->chip_id == SI476X_CHIP_SI4764;
+}
+EXPORT_SYMBOL_GPL(si476x_core_has_am);
+
+bool si476x_core_has_diversity(struct si476x_core *core)
+{
+ return core->chip_id == SI476X_CHIP_SI4764;
+}
+EXPORT_SYMBOL_GPL(si476x_core_has_diversity);
+
+bool si476x_core_is_a_secondary_tuner(struct si476x_core *core)
+{
+ return si476x_core_has_diversity(core) &&
+ (core->diversity_mode == SI476X_PHDIV_SECONDARY_ANTENNA ||
+ core->diversity_mode == SI476X_PHDIV_SECONDARY_COMBINING);
+}
+EXPORT_SYMBOL_GPL(si476x_core_is_a_secondary_tuner);
+
+bool si476x_core_is_a_primary_tuner(struct si476x_core *core)
+{
+ return si476x_core_has_diversity(core) &&
+ (core->diversity_mode == SI476X_PHDIV_PRIMARY_ANTENNA ||
+ core->diversity_mode == SI476X_PHDIV_PRIMARY_COMBINING);
+}
+EXPORT_SYMBOL_GPL(si476x_core_is_a_primary_tuner);
+
+bool si476x_core_is_in_am_receiver_mode(struct si476x_core *core)
+{
+ return si476x_core_has_am(core) &&
+ (core->power_up_parameters.func == SI476X_FUNC_AM_RECEIVER);
+}
+EXPORT_SYMBOL_GPL(si476x_core_is_in_am_receiver_mode);
+
+bool si476x_core_is_powered_up(struct si476x_core *core)
+{
+ return core->power_state == SI476X_POWER_UP_FULL;
+}
+EXPORT_SYMBOL_GPL(si476x_core_is_powered_up);
+
+static int si476x_core_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rval;
+ struct si476x_core *core;
+ struct si476x_platform_data *pdata;
+ struct mfd_cell *cell;
+ int cell_num;
+
+ core = devm_kzalloc(&client->dev, sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ dev_err(&client->dev,
+ "failed to allocate 'struct si476x_core'\n");
+ return -ENOMEM;
+ }
+ core->client = client;
+
+ core->regmap = devm_regmap_init_si476x(core);
+ if (IS_ERR(core->regmap)) {
+ rval = PTR_ERR(core->regmap);
+ dev_err(&client->dev,
+ "Failed to allocate register map: %d\n",
+ rval);
+ return rval;
+ }
+
+ i2c_set_clientdata(client, core);
+
+ atomic_set(&core->is_alive, 0);
+ core->power_state = SI476X_POWER_DOWN;
+
+ pdata = client->dev.platform_data;
+ if (pdata) {
+ memcpy(&core->power_up_parameters,
+ &pdata->power_up_parameters,
+ sizeof(core->power_up_parameters));
+
+ core->gpio_reset = -1;
+ if (gpio_is_valid(pdata->gpio_reset)) {
+ rval = gpio_request(pdata->gpio_reset, "si476x reset");
+ if (rval) {
+ dev_err(&client->dev,
+ "Failed to request gpio: %d\n", rval);
+ return rval;
+ }
+ core->gpio_reset = pdata->gpio_reset;
+ gpio_direction_output(core->gpio_reset, 0);
+ }
+
+ core->diversity_mode = pdata->diversity_mode;
+ memcpy(&core->pinmux, &pdata->pinmux,
+ sizeof(struct si476x_pinmux));
+ } else {
+ dev_err(&client->dev, "No platform data provided\n");
+ return -EINVAL;
+ }
+
+ core->supplies[0].supply = "vd";
+ core->supplies[1].supply = "va";
+ core->supplies[2].supply = "vio1";
+ core->supplies[3].supply = "vio2";
+
+ rval = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(core->supplies),
+ core->supplies);
+ if (rval) {
+ dev_err(&client->dev, "Failet to gett all of the regulators\n");
+ goto free_gpio;
+ }
+
+ mutex_init(&core->cmd_lock);
+ init_waitqueue_head(&core->command);
+ init_waitqueue_head(&core->tuning);
+
+ rval = kfifo_alloc(&core->rds_fifo,
+ SI476X_DRIVER_RDS_FIFO_DEPTH *
+ sizeof(struct v4l2_rds_data),
+ GFP_KERNEL);
+ if (rval) {
+ dev_err(&client->dev, "Could not alloate the FIFO\n");
+ goto free_gpio;
+ }
+ mutex_init(&core->rds_drainer_status_lock);
+ init_waitqueue_head(&core->rds_read_queue);
+ INIT_WORK(&core->rds_fifo_drainer, si476x_core_drain_rds_fifo);
+
+ if (client->irq) {
+ rval = devm_request_threaded_irq(&client->dev,
+ client->irq, NULL,
+ si476x_core_interrupt,
+ IRQF_TRIGGER_FALLING,
+ client->name, core);
+ if (rval < 0) {
+ dev_err(&client->dev, "Could not request IRQ %d\n",
+ client->irq);
+ goto free_kfifo;
+ }
+ disable_irq(client->irq);
+ dev_dbg(&client->dev, "IRQ requested.\n");
+
+ core->rds_fifo_depth = 20;
+ } else {
+ INIT_DELAYED_WORK(&core->status_monitor,
+ si476x_core_poll_loop);
+ dev_info(&client->dev,
+ "No IRQ number specified, will use polling\n");
+
+ core->rds_fifo_depth = 5;
+ }
+
+ core->chip_id = id->driver_data;
+
+ rval = si476x_core_get_revision_info(core);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto free_kfifo;
+ }
+
+ cell_num = 0;
+
+ cell = &core->cells[SI476X_RADIO_CELL];
+ cell->name = "si476x-radio";
+ cell_num++;
+
+#ifdef CONFIG_SND_SOC_SI476X
+ if ((core->chip_id == SI476X_CHIP_SI4761 ||
+ core->chip_id == SI476X_CHIP_SI4764) &&
+ core->pinmux.dclk == SI476X_DCLK_DAUDIO &&
+ core->pinmux.dfs == SI476X_DFS_DAUDIO &&
+ core->pinmux.dout == SI476X_DOUT_I2S_OUTPUT &&
+ core->pinmux.xout == SI476X_XOUT_TRISTATE) {
+ cell = &core->cells[SI476X_CODEC_CELL];
+ cell->name = "si476x-codec";
+ cell_num++;
+ }
+#endif
+ rval = mfd_add_devices(&client->dev,
+ (client->adapter->nr << 8) + client->addr,
+ core->cells, cell_num,
+ NULL, 0, NULL);
+ if (!rval)
+ return 0;
+
+free_kfifo:
+ kfifo_free(&core->rds_fifo);
+
+free_gpio:
+ if (gpio_is_valid(core->gpio_reset))
+ gpio_free(core->gpio_reset);
+
+ return rval;
+}
+
+static int si476x_core_remove(struct i2c_client *client)
+{
+ struct si476x_core *core = i2c_get_clientdata(client);
+
+ si476x_core_pronounce_dead(core);
+ mfd_remove_devices(&client->dev);
+
+ if (client->irq)
+ disable_irq(client->irq);
+ else
+ cancel_delayed_work_sync(&core->status_monitor);
+
+ kfifo_free(&core->rds_fifo);
+
+ if (gpio_is_valid(core->gpio_reset))
+ gpio_free(core->gpio_reset);
+
+ return 0;
+}
+
+
+static const struct i2c_device_id si476x_id[] = {
+ { "si4761", SI476X_CHIP_SI4761 },
+ { "si4764", SI476X_CHIP_SI4764 },
+ { "si4768", SI476X_CHIP_SI4768 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, si476x_id);
+
+static struct i2c_driver si476x_core_driver = {
+ .driver = {
+ .name = "si476x-core",
+ .owner = THIS_MODULE,
+ },
+ .probe = si476x_core_probe,
+ .remove = si476x_core_remove,
+ .id_table = si476x_id,
+};
+module_i2c_driver(si476x_core_driver);
+
+
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("Si4761/64/68 AM/FM MFD core device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/si476x-prop.c b/drivers/mfd/si476x-prop.c
new file mode 100644
index 0000000..cfeffa6
--- /dev/null
+++ b/drivers/mfd/si476x-prop.c
@@ -0,0 +1,241 @@
+/*
+ * drivers/mfd/si476x-prop.c -- Subroutines to access
+ * properties of si476x chips
+ *
+ * Copyright (C) 2012 Innovative Converged Devices(ICD)
+ * Copyright (C) 2013 Andrey Smirnov
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/module.h>
+
+#include <linux/mfd/si476x-core.h>
+
+struct si476x_property_range {
+ u16 low, high;
+};
+
+static bool si476x_core_element_is_in_array(u16 element,
+ const u16 array[],
+ size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (element == array[i])
+ return true;
+
+ return false;
+}
+
+static bool si476x_core_element_is_in_range(u16 element,
+ const struct si476x_property_range range[],
+ size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (element <= range[i].high && element >= range[i].low)
+ return true;
+
+ return false;
+}
+
+static bool si476x_core_is_valid_property_a10(struct si476x_core *core,
+ u16 property)
+{
+ static const u16 valid_properties[] = {
+ 0x0000,
+ 0x0500, 0x0501,
+ 0x0600,
+ 0x0709, 0x070C, 0x070D, 0x70E, 0x710,
+ 0x0718,
+ 0x1207, 0x1208,
+ 0x2007,
+ 0x2300,
+ };
+
+ static const struct si476x_property_range valid_ranges[] = {
+ { 0x0200, 0x0203 },
+ { 0x0300, 0x0303 },
+ { 0x0400, 0x0404 },
+ { 0x0700, 0x0707 },
+ { 0x1100, 0x1102 },
+ { 0x1200, 0x1204 },
+ { 0x1300, 0x1306 },
+ { 0x2000, 0x2005 },
+ { 0x2100, 0x2104 },
+ { 0x2106, 0x2106 },
+ { 0x2200, 0x220E },
+ { 0x3100, 0x3104 },
+ { 0x3207, 0x320F },
+ { 0x3300, 0x3304 },
+ { 0x3500, 0x3517 },
+ { 0x3600, 0x3617 },
+ { 0x3700, 0x3717 },
+ { 0x4000, 0x4003 },
+ };
+
+ return si476x_core_element_is_in_range(property, valid_ranges,
+ ARRAY_SIZE(valid_ranges)) ||
+ si476x_core_element_is_in_array(property, valid_properties,
+ ARRAY_SIZE(valid_properties));
+}
+
+static bool si476x_core_is_valid_property_a20(struct si476x_core *core,
+ u16 property)
+{
+ static const u16 valid_properties[] = {
+ 0x071B,
+ 0x1006,
+ 0x2210,
+ 0x3401,
+ };
+
+ static const struct si476x_property_range valid_ranges[] = {
+ { 0x2215, 0x2219 },
+ };
+
+ return si476x_core_is_valid_property_a10(core, property) ||
+ si476x_core_element_is_in_range(property, valid_ranges,
+ ARRAY_SIZE(valid_ranges)) ||
+ si476x_core_element_is_in_array(property, valid_properties,
+ ARRAY_SIZE(valid_properties));
+}
+
+static bool si476x_core_is_valid_property_a30(struct si476x_core *core,
+ u16 property)
+{
+ static const u16 valid_properties[] = {
+ 0x071C, 0x071D,
+ 0x1007, 0x1008,
+ 0x220F, 0x2214,
+ 0x2301,
+ 0x3105, 0x3106,
+ 0x3402,
+ };
+
+ static const struct si476x_property_range valid_ranges[] = {
+ { 0x0405, 0x0411 },
+ { 0x2008, 0x200B },
+ { 0x2220, 0x2223 },
+ { 0x3100, 0x3106 },
+ };
+
+ return si476x_core_is_valid_property_a20(core, property) ||
+ si476x_core_element_is_in_range(property, valid_ranges,
+ ARRAY_SIZE(valid_ranges)) ||
+ si476x_core_element_is_in_array(property, valid_properties,
+ ARRAY_SIZE(valid_properties));
+}
+
+typedef bool (*valid_property_pred_t) (struct si476x_core *, u16);
+
+static bool si476x_core_is_valid_property(struct si476x_core *core,
+ u16 property)
+{
+ static const valid_property_pred_t is_valid_property[] = {
+ [SI476X_REVISION_A10] = si476x_core_is_valid_property_a10,
+ [SI476X_REVISION_A20] = si476x_core_is_valid_property_a20,
+ [SI476X_REVISION_A30] = si476x_core_is_valid_property_a30,
+ };
+
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+ return is_valid_property[core->revision](core, property);
+}
+
+
+static bool si476x_core_is_readonly_property(struct si476x_core *core,
+ u16 property)
+{
+ BUG_ON(core->revision > SI476X_REVISION_A30 ||
+ core->revision == -1);
+
+ switch (core->revision) {
+ case SI476X_REVISION_A10:
+ return (property == 0x3200);
+ case SI476X_REVISION_A20:
+ return (property == 0x1006 ||
+ property == 0x2210 ||
+ property == 0x3200);
+ case SI476X_REVISION_A30:
+ return false;
+ }
+
+ return false;
+}
+
+static bool si476x_core_regmap_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct si476x_core *core = i2c_get_clientdata(client);
+
+ return si476x_core_is_valid_property(core, (u16) reg);
+
+}
+
+static bool si476x_core_regmap_writable_register(struct device *dev,
+ unsigned int reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct si476x_core *core = i2c_get_clientdata(client);
+
+ return si476x_core_is_valid_property(core, (u16) reg) &&
+ !si476x_core_is_readonly_property(core, (u16) reg);
+}
+
+
+static int si476x_core_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ return si476x_core_cmd_set_property(context, reg, val);
+}
+
+static int si476x_core_regmap_read(void *context, unsigned int reg,
+ unsigned *val)
+{
+ struct si476x_core *core = context;
+ int err;
+
+ err = si476x_core_cmd_get_property(core, reg);
+ if (err < 0)
+ return err;
+
+ *val = err;
+
+ return 0;
+}
+
+
+static const struct regmap_config si476x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .max_register = 0x4003,
+
+ .writeable_reg = si476x_core_regmap_writable_register,
+ .readable_reg = si476x_core_regmap_readable_register,
+
+ .reg_read = si476x_core_regmap_read,
+ .reg_write = si476x_core_regmap_write,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+struct regmap *devm_regmap_init_si476x(struct si476x_core *core)
+{
+ return devm_regmap_init(&core->client->dev, NULL,
+ core, &si476x_regmap_config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_si476x);
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index 9bd3316..d70a3430 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -98,17 +98,6 @@ static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
return 0;
}
-static int mfd_remove(struct pci_dev *pdev)
-{
- struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
-
- if (!mfd)
- return -ENODEV;
- list_del(&mfd->list);
- kfree(mfd);
- return 0;
-}
-
/* This function is exported and is not expected to fail */
u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
enum sta2x11_mfd_plat_dev index)
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index fd5fcb6..0da02e1 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -75,6 +75,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
{ "stmpe801", STMPE801 },
{ "stmpe811", STMPE811 },
{ "stmpe1601", STMPE1601 },
+ { "stmpe1801", STMPE1801 },
{ "stmpe2401", STMPE2401 },
{ "stmpe2403", STMPE2403 },
{ }
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 973659f..a81badb 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -103,7 +103,7 @@ stmpe_spi_probe(struct spi_device *spi)
static int stmpe_spi_remove(struct spi_device *spi)
{
- struct stmpe *stmpe = dev_get_drvdata(&spi->dev);
+ struct stmpe *stmpe = spi_get_drvdata(spi);
return stmpe_remove(stmpe);
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 4b11202..bbccd51 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -19,6 +19,7 @@
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
+#include <linux/delay.h>
#include "stmpe.h"
static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
@@ -643,6 +644,88 @@ static struct stmpe_variant_info stmpe1601 = {
};
/*
+ * STMPE1801
+ */
+static const u8 stmpe1801_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE1801_REG_CHIP_ID,
+ [STMPE_IDX_ICR_LSB] = STMPE1801_REG_INT_CTRL_LOW,
+ [STMPE_IDX_IER_LSB] = STMPE1801_REG_INT_EN_MASK_LOW,
+ [STMPE_IDX_ISR_LSB] = STMPE1801_REG_INT_STA_LOW,
+ [STMPE_IDX_GPMR_LSB] = STMPE1801_REG_GPIO_MP_LOW,
+ [STMPE_IDX_GPSR_LSB] = STMPE1801_REG_GPIO_SET_LOW,
+ [STMPE_IDX_GPCR_LSB] = STMPE1801_REG_GPIO_CLR_LOW,
+ [STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
+ [STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
+ [STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
+ [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
+};
+
+static struct stmpe_variant_block stmpe1801_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = STMPE1801_IRQ_GPIOC,
+ .block = STMPE_BLOCK_GPIO,
+ },
+ {
+ .cell = &stmpe_keypad_cell,
+ .irq = STMPE1801_IRQ_KEYPAD,
+ .block = STMPE_BLOCK_KEYPAD,
+ },
+};
+
+static int stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ unsigned int mask = 0;
+ if (blocks & STMPE_BLOCK_GPIO)
+ mask |= STMPE1801_MSK_INT_EN_GPIO;
+
+ if (blocks & STMPE_BLOCK_KEYPAD)
+ mask |= STMPE1801_MSK_INT_EN_KPC;
+
+ return __stmpe_set_bits(stmpe, STMPE1801_REG_INT_EN_MASK_LOW, mask,
+ enable ? mask : 0);
+}
+
+static int stmpe1801_reset(struct stmpe *stmpe)
+{
+ unsigned long timeout;
+ int ret = 0;
+
+ ret = __stmpe_set_bits(stmpe, STMPE1801_REG_SYS_CTRL,
+ STMPE1801_MSK_SYS_CTRL_RESET, STMPE1801_MSK_SYS_CTRL_RESET);
+ if (ret < 0)
+ return ret;
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (time_before(jiffies, timeout)) {
+ ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
+ if (ret < 0)
+ return ret;
+ if (!(ret & STMPE1801_MSK_SYS_CTRL_RESET))
+ return 0;
+ usleep_range(100, 200);
+ };
+ return -EIO;
+}
+
+static struct stmpe_variant_info stmpe1801 = {
+ .name = "stmpe1801",
+ .id_val = STMPE1801_ID,
+ .id_mask = 0xfff0,
+ .num_gpios = 18,
+ .af_bits = 0,
+ .regs = stmpe1801_regs,
+ .blocks = stmpe1801_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe1801_blocks),
+ .num_irqs = STMPE1801_NR_INTERNAL_IRQS,
+ .enable = stmpe1801_enable,
+ /* stmpe1801 do not have any gpio alternate function */
+ .get_altfunc = NULL,
+};
+
+/*
* STMPE24XX
*/
@@ -740,6 +823,7 @@ static struct stmpe_variant_info *stmpe_variant_info[STMPE_NBR_PARTS] = {
[STMPE801] = &stmpe801,
[STMPE811] = &stmpe811,
[STMPE1601] = &stmpe1601,
+ [STMPE1801] = &stmpe1801,
[STMPE2401] = &stmpe2401,
[STMPE2403] = &stmpe2403,
};
@@ -759,7 +843,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
struct stmpe *stmpe = data;
struct stmpe_variant_info *variant = stmpe->variant;
int num = DIV_ROUND_UP(variant->num_irqs, 8);
- u8 israddr = stmpe->regs[STMPE_IDX_ISR_MSB];
+ u8 israddr;
u8 isr[num];
int ret;
int i;
@@ -771,6 +855,11 @@ static irqreturn_t stmpe_irq(int irq, void *data)
return IRQ_HANDLED;
}
+ if (variant->id_val == STMPE1801_ID)
+ israddr = stmpe->regs[STMPE_IDX_ISR_LSB];
+ else
+ israddr = stmpe->regs[STMPE_IDX_ISR_MSB];
+
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
@@ -938,6 +1027,12 @@ static int stmpe_chip_init(struct stmpe *stmpe)
if (ret)
return ret;
+ if (id == STMPE1801_ID) {
+ ret = stmpe1801_reset(stmpe);
+ if (ret < 0)
+ return ret;
+ }
+
if (stmpe->irq >= 0) {
if (id == STMPE801_ID)
icr = STMPE801_REG_SYS_CTRL_INT_EN;
@@ -1015,7 +1110,10 @@ void stmpe_of_probe(struct stmpe_platform_data *pdata, struct device_node *np)
{
struct device_node *child;
- pdata->id = -1;
+ pdata->id = of_alias_get_id(np, "stmpe-i2c");
+ if (pdata->id < 0)
+ pdata->id = -1;
+
pdata->irq_trigger = IRQF_TRIGGER_NONE;
of_property_read_u32(np, "st,autosleep-timeout",
@@ -1057,6 +1155,9 @@ int stmpe_probe(struct stmpe_client_info *ci, int partnum)
return -ENOMEM;
stmpe_of_probe(pdata, np);
+
+ if (of_find_property(np, "interrupts", NULL) == NULL)
+ ci->irq = -1;
}
stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 7b8e13f..ff2b09b 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -199,6 +199,55 @@ int stmpe_remove(struct stmpe *stmpe);
#define STPME1601_AUTOSLEEP_ENABLE (1 << 3)
/*
+ * STMPE1801
+ */
+#define STMPE1801_ID 0xc110
+#define STMPE1801_NR_INTERNAL_IRQS 5
+#define STMPE1801_IRQ_KEYPAD_COMBI 4
+#define STMPE1801_IRQ_GPIOC 3
+#define STMPE1801_IRQ_KEYPAD_OVER 2
+#define STMPE1801_IRQ_KEYPAD 1
+#define STMPE1801_IRQ_WAKEUP 0
+
+#define STMPE1801_REG_CHIP_ID 0x00
+#define STMPE1801_REG_SYS_CTRL 0x02
+#define STMPE1801_REG_INT_CTRL_LOW 0x04
+#define STMPE1801_REG_INT_EN_MASK_LOW 0x06
+#define STMPE1801_REG_INT_STA_LOW 0x08
+#define STMPE1801_REG_INT_EN_GPIO_MASK_LOW 0x0A
+#define STMPE1801_REG_INT_EN_GPIO_MASK_MID 0x0B
+#define STMPE1801_REG_INT_EN_GPIO_MASK_HIGH 0x0C
+#define STMPE1801_REG_INT_STA_GPIO_LOW 0x0D
+#define STMPE1801_REG_INT_STA_GPIO_MID 0x0E
+#define STMPE1801_REG_INT_STA_GPIO_HIGH 0x0F
+#define STMPE1801_REG_GPIO_SET_LOW 0x10
+#define STMPE1801_REG_GPIO_SET_MID 0x11
+#define STMPE1801_REG_GPIO_SET_HIGH 0x12
+#define STMPE1801_REG_GPIO_CLR_LOW 0x13
+#define STMPE1801_REG_GPIO_CLR_MID 0x14
+#define STMPE1801_REG_GPIO_CLR_HIGH 0x15
+#define STMPE1801_REG_GPIO_MP_LOW 0x16
+#define STMPE1801_REG_GPIO_MP_MID 0x17
+#define STMPE1801_REG_GPIO_MP_HIGH 0x18
+#define STMPE1801_REG_GPIO_SET_DIR_LOW 0x19
+#define STMPE1801_REG_GPIO_SET_DIR_MID 0x1A
+#define STMPE1801_REG_GPIO_SET_DIR_HIGH 0x1B
+#define STMPE1801_REG_GPIO_RE_LOW 0x1C
+#define STMPE1801_REG_GPIO_RE_MID 0x1D
+#define STMPE1801_REG_GPIO_RE_HIGH 0x1E
+#define STMPE1801_REG_GPIO_FE_LOW 0x1F
+#define STMPE1801_REG_GPIO_FE_MID 0x20
+#define STMPE1801_REG_GPIO_FE_HIGH 0x21
+#define STMPE1801_REG_GPIO_PULL_UP_LOW 0x22
+#define STMPE1801_REG_GPIO_PULL_UP_MID 0x23
+#define STMPE1801_REG_GPIO_PULL_UP_HIGH 0x24
+
+#define STMPE1801_MSK_SYS_CTRL_RESET (1 << 7)
+
+#define STMPE1801_MSK_INT_EN_KPC (1 << 1)
+#define STMPE1801_MSK_INT_EN_GPIO (1 << 3)
+
+/*
* STMPE24xx
*/
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 61aea63..962a6e1 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -25,17 +25,15 @@
static struct platform_driver syscon_driver;
struct syscon {
- struct device *dev;
void __iomem *base;
struct regmap *regmap;
};
-static int syscon_match(struct device *dev, void *data)
+static int syscon_match_node(struct device *dev, void *data)
{
- struct syscon *syscon = dev_get_drvdata(dev);
struct device_node *dn = data;
- return (syscon->dev->of_node == dn) ? 1 : 0;
+ return (dev->of_node == dn) ? 1 : 0;
}
struct regmap *syscon_node_to_regmap(struct device_node *np)
@@ -44,7 +42,7 @@ struct regmap *syscon_node_to_regmap(struct device_node *np)
struct device *dev;
dev = driver_find_device(&syscon_driver.driver, NULL, np,
- syscon_match);
+ syscon_match_node);
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
@@ -70,6 +68,34 @@ struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
}
EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_compatible);
+static int syscon_match_pdevname(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+
+ if (id)
+ if (!strcmp(id->name, (const char *)data))
+ return 1;
+
+ return !strcmp(dev_name(dev), (const char *)data);
+}
+
+struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+{
+ struct device *dev;
+ struct syscon *syscon;
+
+ dev = driver_find_device(&syscon_driver.driver, NULL, (void *)s,
+ syscon_match_pdevname);
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ syscon = dev_get_drvdata(dev);
+
+ return syscon->regmap;
+}
+EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_pdevname);
+
struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
const char *property)
{
@@ -101,28 +127,22 @@ static struct regmap_config syscon_regmap_config = {
static int syscon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct syscon *syscon;
- struct resource res;
- int ret;
-
- if (!np)
- return -ENOENT;
+ struct resource *res;
- syscon = devm_kzalloc(dev, sizeof(struct syscon),
- GFP_KERNEL);
+ syscon = devm_kzalloc(dev, sizeof(*syscon), GFP_KERNEL);
if (!syscon)
return -ENOMEM;
- syscon->base = of_iomap(np, 0);
- if (!syscon->base)
- return -EADDRNOTAVAIL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- return ret;
+ syscon->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!syscon->base)
+ return -ENOMEM;
- syscon_regmap_config.max_register = res.end - res.start - 3;
+ syscon_regmap_config.max_register = res->end - res->start - 3;
syscon->regmap = devm_regmap_init_mmio(dev, syscon->base,
&syscon_regmap_config);
if (IS_ERR(syscon->regmap)) {
@@ -130,25 +150,17 @@ static int syscon_probe(struct platform_device *pdev)
return PTR_ERR(syscon->regmap);
}
- syscon->dev = dev;
platform_set_drvdata(pdev, syscon);
- dev_info(dev, "syscon regmap start 0x%x end 0x%x registered\n",
- res.start, res.end);
+ dev_info(dev, "regmap %pR registered\n", res);
return 0;
}
-static int syscon_remove(struct platform_device *pdev)
-{
- struct syscon *syscon;
-
- syscon = platform_get_drvdata(pdev);
- iounmap(syscon->base);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
+static const struct platform_device_id syscon_ids[] = {
+ { "syscon", },
+ { }
+};
static struct platform_driver syscon_driver = {
.driver = {
@@ -157,7 +169,7 @@ static struct platform_driver syscon_driver = {
.of_match_table = of_syscon_match,
},
.probe = syscon_probe,
- .remove = syscon_remove,
+ .id_table = syscon_ids,
};
static int __init syscon_init(void)
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index ecc092c..4cb92bb 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -350,7 +350,8 @@ static int tc3589x_probe(struct i2c_client *i2c,
| I2C_FUNC_SMBUS_I2C_BLOCK))
return -EIO;
- tc3589x = kzalloc(sizeof(struct tc3589x), GFP_KERNEL);
+ tc3589x = devm_kzalloc(&i2c->dev, sizeof(struct tc3589x),
+ GFP_KERNEL);
if (!tc3589x)
return -ENOMEM;
@@ -366,33 +367,27 @@ static int tc3589x_probe(struct i2c_client *i2c,
ret = tc3589x_chip_init(tc3589x);
if (ret)
- goto out_free;
+ return ret;
ret = tc3589x_irq_init(tc3589x, np);
if (ret)
- goto out_free;
+ return ret;
ret = request_threaded_irq(tc3589x->i2c->irq, NULL, tc3589x_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"tc3589x", tc3589x);
if (ret) {
dev_err(tc3589x->dev, "failed to request IRQ: %d\n", ret);
- goto out_free;
+ return ret;
}
ret = tc3589x_device_init(tc3589x);
if (ret) {
dev_err(tc3589x->dev, "failed to add child devices\n");
- goto out_freeirq;
+ return ret;
}
return 0;
-
-out_freeirq:
- free_irq(tc3589x->i2c->irq, tc3589x);
-out_free:
- kfree(tc3589x);
- return ret;
}
static int tc3589x_remove(struct i2c_client *client)
@@ -401,10 +396,6 @@ static int tc3589x_remove(struct i2c_client *client)
mfd_remove_devices(tc3589x->dev);
- free_irq(tc3589x->i2c->irq, tc3589x);
-
- kfree(tc3589x);
-
return 0;
}
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 98edb5be..fbd6ee6 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -56,12 +56,23 @@
#define TPS65090_INT2_MASK_OVERLOAD_FET6 6
#define TPS65090_INT2_MASK_OVERLOAD_FET7 7
+static struct resource charger_resources[] = {
+ {
+ .start = TPS65090_IRQ_VAC_STATUS_CHANGE,
+ .end = TPS65090_IRQ_VAC_STATUS_CHANGE,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
static struct mfd_cell tps65090s[] = {
{
.name = "tps65090-pmic",
},
{
.name = "tps65090-charger",
+ .num_resources = ARRAY_SIZE(charger_resources),
+ .resources = &charger_resources[0],
+ .of_compatible = "ti,tps65090-charger",
},
};
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 942b666..42bd3ea 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -211,12 +211,14 @@ static int twl4030battery_current(int raw_volt)
* @reg_base - Base address of the first channel
* @Channels - 16 bit bitmap. If the bit is set, channel value is read
* @buf - The channel values are stored here. if read fails error
+ * @raw - Return raw values without conversion
* value is stored
* Returns the number of successfully read channels.
*/
static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
u8 reg_base, unsigned
- long channels, int *buf)
+ long channels, int *buf,
+ bool raw)
{
int count = 0, count_req = 0, i;
u8 reg;
@@ -230,6 +232,10 @@ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
count_req++;
continue;
}
+ if (raw) {
+ count++;
+ continue;
+ }
switch (i) {
case 10:
buf[i] = twl4030battery_current(buf[i]);
@@ -371,7 +377,7 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
method = &twl4030_conversion_methods[r->method];
/* Read results */
len = twl4030_madc_read_channels(madc, method->rbase,
- r->channels, r->rbuf);
+ r->channels, r->rbuf, r->raw);
/* Return results to caller */
if (r->func_cb != NULL) {
r->func_cb(len, r->channels, r->rbuf);
@@ -397,7 +403,7 @@ err_i2c:
method = &twl4030_conversion_methods[r->method];
/* Read results */
len = twl4030_madc_read_channels(madc, method->rbase,
- r->channels, r->rbuf);
+ r->channels, r->rbuf, r->raw);
/* Return results to caller */
if (r->func_cb != NULL) {
r->func_cb(len, r->channels, r->rbuf);
@@ -585,7 +591,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
goto out;
}
ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
- req->channels, req->rbuf);
+ req->channels, req->rbuf, req->raw);
twl4030_madc->requests[req->method].active = 0;
out:
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index f361bf3..492ee2c 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -554,7 +554,7 @@ static int twl6040_probe(struct i2c_client *client,
twl6040->supplies[0].supply = "vio";
twl6040->supplies[1].supply = "v2v1";
- ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
+ ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
@@ -564,7 +564,7 @@ static int twl6040_probe(struct i2c_client *client,
ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
- goto power_err;
+ goto regulator_get_err;
}
twl6040->dev = &client->dev;
@@ -586,8 +586,8 @@ static int twl6040_probe(struct i2c_client *client,
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
- ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
- "audpwron");
+ ret = devm_gpio_request_one(&client->dev, twl6040->audpwron,
+ GPIOF_OUT_INIT_LOW, "audpwron");
if (ret)
goto gpio_err;
}
@@ -596,14 +596,14 @@ static int twl6040_probe(struct i2c_client *client,
IRQF_ONESHOT, 0, &twl6040_irq_chip,
&twl6040->irq_data);
if (ret < 0)
- goto irq_init_err;
+ goto gpio_err;
twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
TWL6040_IRQ_READY);
twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
TWL6040_IRQ_TH);
- ret = request_threaded_irq(twl6040->irq_ready, NULL,
+ ret = devm_request_threaded_irq(twl6040->dev, twl6040->irq_ready, NULL,
twl6040_readyint_handler, IRQF_ONESHOT,
"twl6040_irq_ready", twl6040);
if (ret) {
@@ -611,7 +611,7 @@ static int twl6040_probe(struct i2c_client *client,
goto readyirq_err;
}
- ret = request_threaded_irq(twl6040->irq_th, NULL,
+ ret = devm_request_threaded_irq(twl6040->dev, twl6040->irq_th, NULL,
twl6040_thint_handler, IRQF_ONESHOT,
"twl6040_irq_th", twl6040);
if (ret) {
@@ -681,18 +681,13 @@ static int twl6040_probe(struct i2c_client *client,
return 0;
mfd_err:
- free_irq(twl6040->irq_th, twl6040);
+ devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
thirq_err:
- free_irq(twl6040->irq_ready, twl6040);
+ devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
readyirq_err:
regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
-irq_init_err:
- if (gpio_is_valid(twl6040->audpwron))
- gpio_free(twl6040->audpwron);
gpio_err:
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
-power_err:
- regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
regulator_get_err:
i2c_set_clientdata(client, NULL);
err:
@@ -706,18 +701,14 @@ static int twl6040_remove(struct i2c_client *client)
if (twl6040->power_count)
twl6040_power(twl6040, 0);
- if (gpio_is_valid(twl6040->audpwron))
- gpio_free(twl6040->audpwron);
-
- free_irq(twl6040->irq_ready, twl6040);
- free_irq(twl6040->irq_th, twl6040);
+ devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
+ devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
- regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
return 0;
}
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index daf6952..e9031fa 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -75,6 +75,11 @@ static int ucb1400_core_probe(struct device *dev)
/* GPIO */
ucb_gpio.ac97 = ac97;
+ if (pdata) {
+ ucb_gpio.gpio_setup = pdata->gpio_setup;
+ ucb_gpio.gpio_teardown = pdata->gpio_teardown;
+ ucb_gpio.gpio_offset = pdata->gpio_offset;
+ }
ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
if (!ucb->ucb1400_gpio) {
err = -ENOMEM;
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
index 3c1723aa..84ce6b9 100644
--- a/drivers/mfd/vexpress-config.c
+++ b/drivers/mfd/vexpress-config.c
@@ -184,13 +184,14 @@ static int vexpress_config_schedule(struct vexpress_config_trans *trans)
spin_lock_irqsave(&bridge->transactions_lock, flags);
- vexpress_config_dump_trans("Executing", trans);
-
- if (list_empty(&bridge->transactions))
+ if (list_empty(&bridge->transactions)) {
+ vexpress_config_dump_trans("Executing", trans);
status = bridge->info->func_exec(trans->func->func,
trans->offset, trans->write, trans->data);
- else
+ } else {
+ vexpress_config_dump_trans("Queuing", trans);
status = VEXPRESS_CONFIG_STATUS_WAIT;
+ }
switch (status) {
case VEXPRESS_CONFIG_STATUS_DONE:
@@ -212,25 +213,31 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,
{
struct vexpress_config_trans *trans;
unsigned long flags;
+ const char *message = "Completed";
spin_lock_irqsave(&bridge->transactions_lock, flags);
trans = list_first_entry(&bridge->transactions,
struct vexpress_config_trans, list);
- vexpress_config_dump_trans("Completed", trans);
-
trans->status = status;
- list_del(&trans->list);
- if (!list_empty(&bridge->transactions)) {
- vexpress_config_dump_trans("Pending", trans);
+ do {
+ vexpress_config_dump_trans(message, trans);
+ list_del(&trans->list);
+ complete(&trans->completion);
- bridge->info->func_exec(trans->func->func, trans->offset,
- trans->write, trans->data);
- }
- spin_unlock_irqrestore(&bridge->transactions_lock, flags);
+ if (list_empty(&bridge->transactions))
+ break;
+
+ trans = list_first_entry(&bridge->transactions,
+ struct vexpress_config_trans, list);
+ vexpress_config_dump_trans("Executing pending", trans);
+ trans->status = bridge->info->func_exec(trans->func->func,
+ trans->offset, trans->write, trans->data);
+ message = "Finished pending";
+ } while (trans->status == VEXPRESS_CONFIG_STATUS_DONE);
- complete(&trans->completion);
+ spin_unlock_irqrestore(&bridge->transactions_lock, flags);
}
EXPORT_SYMBOL(vexpress_config_complete);
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index bf75e96..96a020b 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -490,12 +490,12 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
return err;
}
+ vexpress_sysreg_dev = &pdev->dev;
+
platform_device_register_data(vexpress_sysreg_dev, "leds-gpio",
PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata,
sizeof(vexpress_sysreg_leds_pdata));
- vexpress_sysreg_dev = &pdev->dev;
-
device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id);
return 0;
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index ca2aed6..155c4a1 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/mfd/arizona/core.h>
@@ -57,31 +58,54 @@ static const struct reg_default wm5102_reva_patch[] = {
};
static const struct reg_default wm5102_revb_patch[] = {
+ { 0x19, 0x0001 },
{ 0x80, 0x0003 },
{ 0x081, 0xE022 },
- { 0x410, 0x4080 },
- { 0x418, 0x4080 },
- { 0x420, 0x4080 },
- { 0x428, 0xC000 },
+ { 0x410, 0x6080 },
+ { 0x418, 0xa080 },
+ { 0x420, 0xa080 },
+ { 0x428, 0xe000 },
+ { 0x443, 0xDC1A },
{ 0x4B0, 0x0066 },
{ 0x458, 0x000b },
{ 0x212, 0x0000 },
+ { 0x171, 0x0000 },
+ { 0x35E, 0x000C },
+ { 0x2D4, 0x0000 },
{ 0x80, 0x0000 },
};
/* We use a function so we can use ARRAY_SIZE() */
int wm5102_patch(struct arizona *arizona)
{
+ const struct reg_default *wm5102_patch;
+ int ret = 0;
+ int i, patch_size;
+
switch (arizona->rev) {
case 0:
- return regmap_register_patch(arizona->regmap,
- wm5102_reva_patch,
- ARRAY_SIZE(wm5102_reva_patch));
+ wm5102_patch = wm5102_reva_patch;
+ patch_size = ARRAY_SIZE(wm5102_reva_patch);
default:
- return regmap_register_patch(arizona->regmap,
- wm5102_revb_patch,
- ARRAY_SIZE(wm5102_revb_patch));
+ wm5102_patch = wm5102_revb_patch;
+ patch_size = ARRAY_SIZE(wm5102_revb_patch);
+ }
+
+ regcache_cache_bypass(arizona->regmap, true);
+
+ for (i = 0; i < patch_size; i++) {
+ ret = regmap_write(arizona->regmap, wm5102_patch[i].reg,
+ wm5102_patch[i].def);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to write %x = %x: %d\n",
+ wm5102_patch[i].reg, wm5102_patch[i].def, ret);
+ goto out;
+ }
}
+
+out:
+ regcache_cache_bypass(arizona->regmap, false);
+ return ret;
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
@@ -282,7 +306,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
{ 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
- { 0x00000171, 0x0002 }, /* R369 - FLL1 Control 1 */
+ { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
@@ -290,12 +314,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
{ 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
+ { 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
{ 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
{ 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
{ 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
{ 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000187, 0x0001 }, /* R391 - FLL1 Synchroniser 7 */
{ 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
{ 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
{ 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
@@ -306,12 +332,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
{ 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
+ { 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
{ 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
{ 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
{ 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
{ 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A7, 0x0001 }, /* R423 - FLL2 Synchroniser 7 */
{ 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
@@ -362,7 +390,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
{ 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
{ 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
- { 0x00000410, 0x4080 }, /* R1040 - Output Path Config 1L */
+ { 0x00000410, 0x6080 }, /* R1040 - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
{ 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
{ 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
@@ -370,7 +398,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
{ 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
{ 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
- { 0x00000418, 0x4080 }, /* R1048 - Output Path Config 2L */
+ { 0x00000418, 0xA080 }, /* R1048 - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
{ 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
{ 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
@@ -378,11 +406,11 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
{ 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
{ 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
- { 0x00000420, 0x4080 }, /* R1056 - Output Path Config 3L */
+ { 0x00000420, 0xA080 }, /* R1056 - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
{ 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
{ 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
- { 0x00000428, 0xC000 }, /* R1064 - Output Path Config 4L */
+ { 0x00000428, 0xE000 }, /* R1064 - Output Path Config 4L */
{ 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
{ 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
{ 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
@@ -397,7 +425,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
- { 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
+ { 0x00000458, 0x000B }, /* R1112 - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
@@ -1055,12 +1083,14 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL1_CONTROL_7:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
case ARIZONA_FLL1_SYNCHRONISER_4:
case ARIZONA_FLL1_SYNCHRONISER_5:
case ARIZONA_FLL1_SYNCHRONISER_6:
+ case ARIZONA_FLL1_SYNCHRONISER_7:
case ARIZONA_FLL1_SPREAD_SPECTRUM:
case ARIZONA_FLL1_GPIO_CLOCK:
case ARIZONA_FLL2_CONTROL_1:
@@ -1071,12 +1101,14 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
case ARIZONA_FLL2_NCO_TEST_0:
+ case ARIZONA_FLL2_CONTROL_7:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
case ARIZONA_FLL2_SYNCHRONISER_4:
case ARIZONA_FLL2_SYNCHRONISER_5:
case ARIZONA_FLL2_SYNCHRONISER_6:
+ case ARIZONA_FLL2_SYNCHRONISER_7:
case ARIZONA_FLL2_SPREAD_SPECTRUM:
case ARIZONA_FLL2_GPIO_CLOCK:
case ARIZONA_MIC_CHARGE_PUMP_1:
@@ -1169,6 +1201,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_NOISE_GATE_CONTROL:
case ARIZONA_PDM_SPK1_CTRL_1:
case ARIZONA_PDM_SPK1_CTRL_2:
+ case ARIZONA_SPK_CTRL_2:
+ case ARIZONA_SPK_CTRL_3:
case ARIZONA_DAC_COMP_1:
case ARIZONA_DAC_COMP_2:
case ARIZONA_DAC_COMP_3:
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 4e70e15..e7ed14f66 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -37,7 +37,7 @@ static int wm831x_spi_probe(struct spi_device *spi)
spi->bits_per_word = 16;
spi->mode = SPI_MODE_0;
- dev_set_drvdata(&spi->dev, wm831x);
+ spi_set_drvdata(spi, wm831x);
wm831x->dev = &spi->dev;
wm831x->regmap = devm_regmap_init_spi(spi, &wm831x_regmap_config);
@@ -53,7 +53,7 @@ static int wm831x_spi_probe(struct spi_device *spi)
static int wm831x_spi_remove(struct spi_device *spi)
{
- struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+ struct wm831x *wm831x = spi_get_drvdata(spi);
wm831x_device_exit(wm831x);
@@ -69,7 +69,7 @@ static int wm831x_spi_suspend(struct device *dev)
static void wm831x_spi_shutdown(struct spi_device *spi)
{
- struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+ struct wm831x *wm831x = spi_get_drvdata(spi);
wm831x_device_shutdown(wm831x);
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 803e93f..00e4fe2 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -19,6 +19,9 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -191,7 +194,7 @@ static const char *wm8958_main_supplies[] = {
"SPKVDD2",
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_RUNTIME
static int wm8994_suspend(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
@@ -396,6 +399,60 @@ static const struct reg_default wm1811_reva_patch[] = {
{ 0x102, 0x0 },
};
+#ifdef CONFIG_OF
+static int wm8994_set_pdata_from_of(struct wm8994 *wm8994)
+{
+ struct device_node *np = wm8994->dev->of_node;
+ struct wm8994_pdata *pdata = &wm8994->pdata;
+ int i;
+
+ if (!np)
+ return 0;
+
+ if (of_property_read_u32_array(np, "wlf,gpio-cfg", pdata->gpio_defaults,
+ ARRAY_SIZE(pdata->gpio_defaults)) >= 0) {
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+ if (wm8994->pdata.gpio_defaults[i] == 0)
+ pdata->gpio_defaults[i]
+ = WM8994_CONFIGURE_GPIO;
+ }
+ }
+
+ of_property_read_u32_array(np, "wlf,micbias-cfg", pdata->micbias,
+ ARRAY_SIZE(pdata->micbias));
+
+ pdata->lineout1_diff = true;
+ pdata->lineout2_diff = true;
+ if (of_find_property(np, "wlf,lineout1-se", NULL))
+ pdata->lineout1_diff = false;
+ if (of_find_property(np, "wlf,lineout2-se", NULL))
+ pdata->lineout2_diff = false;
+
+ if (of_find_property(np, "wlf,lineout1-feedback", NULL))
+ pdata->lineout1fb = true;
+ if (of_find_property(np, "wlf,lineout2-feedback", NULL))
+ pdata->lineout2fb = true;
+
+ if (of_find_property(np, "wlf,ldoena-always-driven", NULL))
+ pdata->lineout2fb = true;
+
+ pdata->ldo[0].enable = of_get_named_gpio(np, "wlf,ldo1ena", 0);
+ if (pdata->ldo[0].enable < 0)
+ pdata->ldo[0].enable = 0;
+
+ pdata->ldo[1].enable = of_get_named_gpio(np, "wlf,ldo2ena", 0);
+ if (pdata->ldo[1].enable < 0)
+ pdata->ldo[1].enable = 0;
+
+ return 0;
+}
+#else
+static int wm8994_set_pdata_from_of(struct wm8994 *wm8994)
+{
+ return 0;
+}
+#endif
+
/*
* Instantiate the generic non-control parts of the device.
*/
@@ -405,7 +462,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
struct regmap_config *regmap_config;
const struct reg_default *regmap_patch = NULL;
const char *devname;
- int ret, i, patch_regs;
+ int ret, i, patch_regs = 0;
int pulls = 0;
if (dev_get_platdata(wm8994->dev)) {
@@ -414,6 +471,10 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
pdata = &wm8994->pdata;
+ ret = wm8994_set_pdata_from_of(wm8994);
+ if (ret != 0)
+ return ret;
+
dev_set_drvdata(wm8994->dev, wm8994);
/* Add the on-chip regulators first for bootstrapping */
@@ -673,9 +734,9 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
}
static const struct of_device_id wm8994_of_match[] = {
- { .compatible = "wlf,wm1811", },
- { .compatible = "wlf,wm8994", },
- { .compatible = "wlf,wm8958", },
+ { .compatible = "wlf,wm1811", .data = (void *)WM1811 },
+ { .compatible = "wlf,wm8994", .data = (void *)WM8994 },
+ { .compatible = "wlf,wm8958", .data = (void *)WM8958 },
{ }
};
MODULE_DEVICE_TABLE(of, wm8994_of_match);
@@ -683,6 +744,7 @@ MODULE_DEVICE_TABLE(of, wm8994_of_match);
static int wm8994_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ const struct of_device_id *of_id;
struct wm8994 *wm8994;
int ret;
@@ -693,7 +755,14 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8994);
wm8994->dev = &i2c->dev;
wm8994->irq = i2c->irq;
- wm8994->type = id->driver_data;
+
+ if (i2c->dev.of_node) {
+ of_id = of_match_device(wm8994_of_match, &i2c->dev);
+ if (of_id)
+ wm8994->type = (int)of_id->data;
+ } else {
+ wm8994->type = id->driver_data;
+ }
wm8994->regmap = devm_regmap_init_i2c(i2c, &wm8994_base_regmap_config);
if (IS_ERR(wm8994->regmap)) {
@@ -724,15 +793,16 @@ static const struct i2c_device_id wm8994_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
-static UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume,
- NULL);
+static const struct dev_pm_ops wm8994_pm_ops = {
+ SET_RUNTIME_PM_OPS(wm8994_suspend, wm8994_resume, NULL)
+};
static struct i2c_driver wm8994_i2c_driver = {
.driver = {
.name = "wm8994",
.owner = THIS_MODULE,
.pm = &wm8994_pm_ops,
- .of_match_table = wm8994_of_match,
+ .of_match_table = of_match_ptr(wm8994_of_match),
},
.probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 4a87e5c..4cd4a3d 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -593,7 +593,6 @@ static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
- fasync_helper(-1, file, 0, &lis3->async_queue);
clear_bit(0, &lis3->misc_opened); /* release the device */
if (lis3->pm_dev)
pm_runtime_put(lis3->pm_dev);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 950dbe9..797d796 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -355,7 +355,7 @@ static void delete_proc_files(void)
for (p = proc_files; p->name; p++)
if (p->entry)
remove_proc_entry(p->name, proc_gru);
- remove_proc_entry("gru", proc_gru->parent);
+ proc_remove(proc_gru);
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5bab73b..dd27b07 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -304,14 +304,13 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
+static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
struct mmc_blk_data *md = disk->private_data;
mutex_lock(&block_mutex);
mmc_blk_put(md);
mutex_unlock(&block_mutex);
- return 0;
}
static int
@@ -1932,8 +1931,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
out:
- if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
- /* release host only when there are no more requests */
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+ (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
+ /*
+ * Release host when there are no more requests
+ * and after special request(discard, flush) is done.
+ * In case sepecial request, there is no reentry to
+ * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
+ */
mmc_release_host(card->host);
return ret;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fa4e44e..9447a0e 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,9 +22,6 @@
#define MMC_QUEUE_BOUNCESZ 65536
-
-#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
-
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 031bf63..5752d50 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -1,6 +1,8 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
+#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
+
struct request;
struct task_struct;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9290bb5..c40396f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2289,6 +2289,19 @@ int _mmc_detect_card_removed(struct mmc_host *host)
return 1;
ret = host->bus_ops->alive(host);
+
+ /*
+ * Card detect status and alive check may be out of sync if card is
+ * removed slowly, when card detect switch changes while card/slot
+ * pads are still contacted in hardware (refer to "SD Card Mechanical
+ * Addendum, Appendix C: Card Detection Switch"). So reschedule a
+ * detect work 200ms later for this case.
+ */
+ if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
+ mmc_detect_change(host, msecs_to_jiffies(200));
+ pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
+ }
+
if (ret) {
mmc_card_set_removed(host->card);
pr_debug("%s: card remove detected\n", mmc_hostname(host));
@@ -2403,7 +2416,10 @@ void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
- mmc_power_up(host);
+ if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
+ mmc_power_off(host);
+ else
+ mmc_power_up(host);
mmc_detect_change(host, 0);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c8f3d6e..0cbd1ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -96,6 +96,7 @@ static int mmc_decode_cid(struct mmc_card *card)
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
card->cid.month = UNSTUFF_BITS(resp, 12, 4);
card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
@@ -368,13 +369,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
card->ext_csd.raw_trim_mult =
ext_csd[EXT_CSD_TRIM_MULT];
+ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
if (card->ext_csd.rev >= 4) {
/*
* Enhanced area feature support -- check whether the eMMC
* card has the Enhanced area enabled. If so, export enhanced
* area offset and size to user by adding sysfs interface.
*/
- card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
hc_erase_grp_sz =
@@ -627,6 +628,7 @@ MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
+MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
@@ -645,6 +647,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_manfid.attr,
&dev_attr_name.attr,
&dev_attr_oemid.attr,
+ &dev_attr_prv.attr,
&dev_attr_serial.attr,
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index aa0719a..6889a82 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -861,8 +861,10 @@ static void mmc_sdio_detect(struct mmc_host *host)
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
err = pm_runtime_get_sync(&host->card->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_noidle(&host->card->dev);
goto out;
+ }
}
mmc_claim_host(host);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 5e57048..546c67c 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -137,7 +138,7 @@ static int sdio_bus_probe(struct device *dev)
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
ret = pm_runtime_get_sync(dev);
if (ret < 0)
- goto out;
+ goto disable_runtimepm;
}
/* Set the default block size so the driver is sure it's something
@@ -157,7 +158,6 @@ static int sdio_bus_probe(struct device *dev)
disable_runtimepm:
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
-out:
return ret;
}
@@ -299,6 +299,19 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
return func;
}
+#ifdef CONFIG_ACPI
+static void sdio_acpi_set_handle(struct sdio_func *func)
+{
+ struct mmc_host *host = func->card->host;
+ u64 addr = (host->slotno << 16) | func->num;
+
+ ACPI_HANDLE_SET(&func->dev,
+ acpi_get_child(ACPI_HANDLE(host->parent), addr));
+}
+#else
+static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
+#endif
+
/*
* Register a new SDIO function with the driver model.
*/
@@ -308,9 +321,12 @@ int sdio_add_func(struct sdio_func *func)
dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
+ sdio_acpi_set_handle(func);
ret = device_add(&func->dev);
- if (ret == 0)
+ if (ret == 0) {
sdio_func_set_present(func);
+ acpi_dev_pm_attach(&func->dev, false);
+ }
return ret;
}
@@ -326,6 +342,7 @@ void sdio_remove_func(struct sdio_func *func)
if (!sdio_func_present(func))
return;
+ acpi_dev_pm_detach(&func->dev, false);
device_del(&func->dev);
put_device(&func->dev);
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d88219e..9ab8f8d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -190,6 +190,17 @@ config MMC_SDHCI_S3C
If unsure, say N.
+config MMC_SDHCI_SIRF
+ tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
+ depends on ARCH_SIRF
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for SiRF System-on-Chip devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_PXAV3
tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
depends on CLKDEV_LOOKUP
@@ -300,16 +311,6 @@ config MMC_ATMELMCI
If unsure, say N.
-config MMC_ATMELMCI_DMA
- bool "Atmel MCI DMA support"
- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
- help
- Say Y here to have the Atmel MCI driver use a DMA engine to
- do data transfers and thus increase the throughput and
- reduce the CPU utilization.
-
- If unsure, say N.
-
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
depends on MMC && ARCH_MSM
@@ -319,12 +320,12 @@ config MMC_MSM
support for SDIO devices.
config MMC_MXC
- tristate "Freescale i.MX21/27/31 Multimedia Card Interface support"
- depends on ARCH_MXC
+ tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
+ depends on ARCH_MXC || PPC_MPC512x
help
- This selects the Freescale i.MX21, i.MX27 and i.MX31 Multimedia card
- Interface. If you have a i.MX platform with a Multimedia Card slot,
- say Y or M here.
+ This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x
+ Multimedia Card Interface. If you have an i.MX or MPC512x platform
+ with a Multimedia Card slot, say Y or M here.
If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index c380e3c..cd32280 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
+obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index ef3aef0..7780c14 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -476,7 +476,7 @@ static int goldfish_mmc_probe(struct platform_device *pdev)
host->mmc = mmc;
pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
- host->reg_base = ioremap(res->start, res->end - res->start + 1);
+ host->reg_base = ioremap(res->start, resource_size(res));
if (host->reg_base == NULL) {
ret = -ENOMEM;
goto ioremap_failed;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 722af1d..e75774f 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -178,6 +178,7 @@ struct atmel_mci {
void __iomem *regs;
struct scatterlist *sg;
+ unsigned int sg_len;
unsigned int pio_offset;
unsigned int *buffer;
unsigned int buf_size;
@@ -892,6 +893,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
data->error = -EINPROGRESS;
host->sg = data->sg;
+ host->sg_len = data->sg_len;
host->data = data;
host->data_chan = NULL;
@@ -1826,7 +1828,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
if (offset == sg->length) {
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
- if (!sg)
+ host->sg_len--;
+ if (!sg || !host->sg_len)
goto done;
offset = 0;
@@ -1839,7 +1842,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
- if (!sg)
+ host->sg_len--;
+ if (!sg || !host->sg_len)
goto done;
offset = 4 - remaining;
@@ -1890,7 +1894,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
nbytes += 4;
if (offset == sg->length) {
host->sg = sg = sg_next(sg);
- if (!sg)
+ host->sg_len--;
+ if (!sg || !host->sg_len)
goto done;
offset = 0;
@@ -1904,7 +1909,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
nbytes += remaining;
host->sg = sg = sg_next(sg);
- if (!sg) {
+ host->sg_len--;
+ if (!sg || !host->sg_len) {
atmci_writel(host, ATMCI_TDR, value);
goto done;
}
@@ -2487,10 +2493,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
atmci_readl(host, ATMCI_SR);
clk_disable(host->mck);
-#ifdef CONFIG_MMC_ATMELMCI_DMA
if (host->dma.chan)
dma_release_channel(host->dma.chan);
-#endif
free_irq(platform_get_irq(pdev, 0), host);
iounmap(host->regs);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 2063677..3946a0e 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -34,6 +34,8 @@
#include <linux/dma-mapping.h>
#include <linux/edma.h>
#include <linux/mmc/mmc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_data/mmc-davinci.h>
@@ -522,14 +524,16 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
dma_cap_set(DMA_SLAVE, mask);
host->dma_tx =
- dma_request_channel(mask, edma_filter_fn, &host->txdma);
+ dma_request_slave_channel_compat(mask, edma_filter_fn,
+ &host->txdma, mmc_dev(host->mmc), "tx");
if (!host->dma_tx) {
dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
return -ENODEV;
}
host->dma_rx =
- dma_request_channel(mask, edma_filter_fn, &host->rxdma);
+ dma_request_slave_channel_compat(mask, edma_filter_fn,
+ &host->rxdma, mmc_dev(host->mmc), "rx");
if (!host->dma_rx) {
dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
r = -ENODEV;
@@ -1157,16 +1161,86 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
mmc_davinci_reset_ctrl(host, 0);
}
-static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+static struct platform_device_id davinci_mmc_devtype[] = {
+ {
+ .name = "dm6441-mmc",
+ .driver_data = MMC_CTLR_VERSION_1,
+ }, {
+ .name = "da830-mmc",
+ .driver_data = MMC_CTLR_VERSION_2,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype);
+
+static const struct of_device_id davinci_mmc_dt_ids[] = {
+ {
+ .compatible = "ti,dm6441-mmc",
+ .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1],
+ },
+ {
+ .compatible = "ti,da830-mmc",
+ .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2],
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
+
+static struct davinci_mmc_config
+ *mmc_parse_pdata(struct platform_device *pdev)
{
+ struct device_node *np;
struct davinci_mmc_config *pdata = pdev->dev.platform_data;
+ const struct of_device_id *match =
+ of_match_device(of_match_ptr(davinci_mmc_dt_ids), &pdev->dev);
+ u32 data;
+
+ np = pdev->dev.of_node;
+ if (!np)
+ return pdata;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
+ goto nodata;
+ }
+
+ if (match)
+ pdev->id_entry = match->data;
+
+ if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
+ dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
+
+ of_property_read_u32(np, "bus-width", &data);
+ switch (data) {
+ case 1:
+ case 4:
+ case 8:
+ pdata->wires = data;
+ break;
+ default:
+ pdata->wires = 1;
+ dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
+ }
+nodata:
+ return pdata;
+}
+
+static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+{
+ struct davinci_mmc_config *pdata = NULL;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
int ret = 0, irq = 0;
size_t mem_size;
+ const struct platform_device_id *id_entry;
- /* REVISIT: when we're fully converted, fail if pdata is NULL */
+ pdata = mmc_parse_pdata(pdev);
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Couldn't get platform data\n");
+ return -ENOENT;
+ }
ret = -ENODEV;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1190,13 +1264,15 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!r)
- goto out;
- host->rxdma = r->start;
+ dev_warn(&pdev->dev, "RX DMA resource not specified\n");
+ else
+ host->rxdma = r->start;
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!r)
- goto out;
- host->txdma = r->start;
+ dev_warn(&pdev->dev, "TX DMA resource not specified\n");
+ else
+ host->txdma = r->start;
host->mem_res = mem;
host->base = ioremap(mem->start, mem_size);
@@ -1237,7 +1313,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
if (pdata && (pdata->wires == 8))
mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
- host->version = pdata->version;
+ id_entry = platform_get_device_id(pdev);
+ if (id_entry)
+ host->version = id_entry->driver_data;
mmc->ops = &mmc_davinci_ops;
mmc->f_min = 312500;
@@ -1406,22 +1484,13 @@ static struct platform_driver davinci_mmcsd_driver = {
.name = "davinci_mmc",
.owner = THIS_MODULE,
.pm = davinci_mmcsd_pm_ops,
+ .of_match_table = of_match_ptr(davinci_mmc_dt_ids),
},
.remove = __exit_p(davinci_mmcsd_remove),
+ .id_table = davinci_mmc_devtype,
};
-static int __init davinci_mmcsd_init(void)
-{
- return platform_driver_probe(&davinci_mmcsd_driver,
- davinci_mmcsd_probe);
-}
-module_init(davinci_mmcsd_init);
-
-static void __exit davinci_mmcsd_exit(void)
-{
- platform_driver_unregister(&davinci_mmcsd_driver);
-}
-module_exit(davinci_mmcsd_exit);
+module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 72fd0f2..f013e7e 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -152,45 +152,8 @@ static int dw_mci_exynos_parse_dt(struct dw_mci *host)
return 0;
}
-static int dw_mci_exynos_setup_bus(struct dw_mci *host,
- struct device_node *slot_np, u8 bus_width)
-{
- int idx, gpio, ret;
-
- if (!slot_np)
- return -EINVAL;
-
- /* cmd + clock + bus-width pins */
- for (idx = 0; idx < NUM_PINS(bus_width); idx++) {
- gpio = of_get_gpio(slot_np, idx);
- if (!gpio_is_valid(gpio)) {
- dev_err(host->dev, "invalid gpio: %d\n", gpio);
- return -EINVAL;
- }
-
- ret = devm_gpio_request(host->dev, gpio, "dw-mci-bus");
- if (ret) {
- dev_err(host->dev, "gpio [%d] request failed\n", gpio);
- return -EBUSY;
- }
- }
-
- if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
- return 0;
-
- gpio = of_get_named_gpio(slot_np, "samsung,cd-pinmux-gpio", 0);
- if (gpio_is_valid(gpio)) {
- if (devm_gpio_request(host->dev, gpio, "dw-mci-cd"))
- dev_err(host->dev, "gpio [%d] request failed\n", gpio);
- } else {
- dev_info(host->dev, "cd gpio not available");
- }
-
- return 0;
-}
-
-/* Exynos5250 controller specific capabilities */
-static unsigned long exynos5250_dwmmc_caps[4] = {
+/* Common capabilities of Exynos4/Exynos5 SoC */
+static unsigned long exynos_dwmmc_caps[4] = {
MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
MMC_CAP_CMD23,
@@ -198,24 +161,25 @@ static unsigned long exynos5250_dwmmc_caps[4] = {
MMC_CAP_CMD23,
};
-static const struct dw_mci_drv_data exynos5250_drv_data = {
- .caps = exynos5250_dwmmc_caps,
+static const struct dw_mci_drv_data exynos_drv_data = {
+ .caps = exynos_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
.setup_clock = dw_mci_exynos_setup_clock,
.prepare_command = dw_mci_exynos_prepare_command,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
- .setup_bus = dw_mci_exynos_setup_bus,
};
static const struct of_device_id dw_mci_exynos_match[] = {
+ { .compatible = "samsung,exynos4412-dw-mshc",
+ .data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5250-dw-mshc",
- .data = &exynos5250_drv_data, },
+ .data = &exynos_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
-int dw_mci_exynos_probe(struct platform_device *pdev)
+static int dw_mci_exynos_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
@@ -230,7 +194,7 @@ static struct platform_driver dw_mci_exynos_pltfm_driver = {
.remove = __exit_p(dw_mci_pltfm_remove),
.driver = {
.name = "dwmmc_exynos",
- .of_match_table = of_match_ptr(dw_mci_exynos_match),
+ .of_match_table = dw_mci_exynos_match,
.pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9834221..bc3a1bc 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -795,9 +795,9 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* DDR mode set */
if (ios->timing == MMC_TIMING_UHS_DDR50)
- regs |= (0x1 << slot->id) << 16;
+ regs |= ((0x1 << slot->id) << 16);
else
- regs &= ~(0x1 << slot->id) << 16;
+ regs &= ~((0x1 << slot->id) << 16);
mci_writel(slot->host, UHS_REG, regs);
@@ -818,6 +818,20 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+ /* Power up slot */
+ if (slot->host->pdata->setpower)
+ slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
+ regs = mci_readl(slot->host, PWREN);
+ regs |= (1 << slot->id);
+ mci_writel(slot->host, PWREN, regs);
+ break;
+ case MMC_POWER_OFF:
+ /* Power down slot */
+ if (slot->host->pdata->setpower)
+ slot->host->pdata->setpower(slot->id, 0);
+ regs = mci_readl(slot->host, PWREN);
+ regs &= ~(1 << slot->id);
+ mci_writel(slot->host, PWREN, regs);
break;
default:
break;
@@ -1191,12 +1205,15 @@ static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
- if (!sg_next(host->sg) || host->part_buf_count == 2) {
+ if (host->part_buf_count == 2) {
mci_writew(host, DATA(host->data_offset),
host->part_buf16);
host->part_buf_count = 0;
@@ -1229,9 +1246,11 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
- if (!sg_next(host->sg))
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
mci_writew(host, DATA(host->data_offset),
- host->part_buf16);
+ host->part_buf16);
}
}
@@ -1269,12 +1288,15 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
- if (!sg_next(host->sg) || host->part_buf_count == 4) {
+ if (host->part_buf_count == 4) {
mci_writel(host, DATA(host->data_offset),
host->part_buf32);
host->part_buf_count = 0;
@@ -1307,9 +1329,11 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
- if (!sg_next(host->sg))
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
mci_writel(host, DATA(host->data_offset),
- host->part_buf32);
+ host->part_buf32);
}
}
@@ -1347,13 +1371,17 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
- if (!sg_next(host->sg) || host->part_buf_count == 8) {
- mci_writew(host, DATA(host->data_offset),
+
+ if (host->part_buf_count == 8) {
+ mci_writeq(host, DATA(host->data_offset),
host->part_buf);
host->part_buf_count = 0;
}
@@ -1385,9 +1413,11 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
- if (!sg_next(host->sg))
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
mci_writeq(host, DATA(host->data_offset),
- host->part_buf);
+ host->part_buf);
}
}
@@ -1438,7 +1468,7 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
host->pull_data(host, buf, cnt);
}
-static void dw_mci_read_data_pio(struct dw_mci *host)
+static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
{
struct sg_mapping_iter *sg_miter = &host->sg_miter;
void *buf;
@@ -1446,7 +1476,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
- unsigned int nbytes = 0, len;
+ unsigned int len;
unsigned int remain, fcnt;
do {
@@ -1465,16 +1495,17 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
if (!len)
break;
dw_mci_pull_data(host, (void *)(buf + offset), len);
+ data->bytes_xfered += len;
offset += len;
- nbytes += len;
remain -= len;
} while (remain);
sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
- } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
- data->bytes_xfered += nbytes;
+ /* if the RXDR is ready read again */
+ } while ((status & SDMMC_INT_RXDR) ||
+ (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
if (!remain) {
if (!sg_miter_next(sg_miter))
@@ -1485,7 +1516,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
return;
done:
- data->bytes_xfered += nbytes;
sg_miter_stop(sg_miter);
host->sg = NULL;
smp_wmb();
@@ -1500,7 +1530,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
- unsigned int nbytes = 0, len;
+ unsigned int len;
unsigned int fifo_depth = host->fifo_depth;
unsigned int remain, fcnt;
@@ -1521,8 +1551,8 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
if (!len)
break;
host->push_data(host, (void *)(buf + offset), len);
+ data->bytes_xfered += len;
offset += len;
- nbytes += len;
remain -= len;
} while (remain);
@@ -1530,7 +1560,6 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
- data->bytes_xfered += nbytes;
if (!remain) {
if (!sg_miter_next(sg_miter))
@@ -1541,7 +1570,6 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
return;
done:
- data->bytes_xfered += nbytes;
sg_miter_stop(sg_miter);
host->sg = NULL;
smp_wmb();
@@ -1563,11 +1591,11 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
u32 pending;
- unsigned int pass_count = 0;
int i;
- do {
- pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+ if (pending) {
/*
* DTO fix - version 2.10a and below, and only if internal DMA
@@ -1579,9 +1607,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
pending |= SDMMC_INT_DATA_OVER;
}
- if (!pending)
- break;
-
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
@@ -1605,7 +1630,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
smp_wmb();
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
- dw_mci_read_data_pio(host);
+ dw_mci_read_data_pio(host, true);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
@@ -1614,7 +1639,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_RXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
- dw_mci_read_data_pio(host);
+ dw_mci_read_data_pio(host, false);
}
if (pending & SDMMC_INT_TXDR) {
@@ -1642,7 +1667,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
}
- } while (pass_count++ < 5);
+ }
#ifdef CONFIG_MMC_DW_IDMAC
/* Handle DMA interrupts */
@@ -1674,10 +1699,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
- /* Power up slot (before spin_lock, may sleep) */
- if (present != 0 && host->pdata->setpower)
- host->pdata->setpower(slot->id, mmc->ocr_avail);
-
spin_lock_bh(&host->lock);
/* Card change detected */
@@ -1760,10 +1781,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
spin_unlock_bh(&host->lock);
- /* Power down slot (after spin_unlock, may sleep) */
- if (present == 0 && host->pdata->setpower)
- host->pdata->setpower(slot->id, 0);
-
present = dw_mci_get_cd(mmc);
}
@@ -1935,14 +1952,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
else
bus_width = 1;
- if (drv_data && drv_data->setup_bus) {
- struct device_node *slot_np;
- slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
- ret = drv_data->setup_bus(host, slot_np, bus_width);
- if (ret)
- goto err_setup_bus;
- }
-
switch (bus_width) {
case 8:
mmc->caps |= MMC_CAP_8_BIT_DATA;
@@ -1980,8 +1989,14 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (IS_ERR(host->vmmc)) {
pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
- } else
- regulator_enable(host->vmmc);
+ } else {
+ ret = regulator_enable(host->vmmc);
+ if (ret) {
+ dev_err(host->dev,
+ "failed to enable regulator: %d\n", ret);
+ goto err_setup_bus;
+ }
+ }
if (dw_mci_get_cd(mmc))
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
@@ -1990,7 +2005,9 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_setup_bus;
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
@@ -2289,6 +2306,18 @@ int dw_mci_probe(struct dw_mci *host)
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
+ /*
+ * In 2.40a spec, Data offset is changed.
+ * Need to check the version-id and set data-offset for DATA register.
+ */
+ host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
+ dev_info(host->dev, "Version ID is %04x\n", host->verid);
+
+ if (host->verid < DW_MMC_240A)
+ host->data_offset = DATA_OFFSET;
+ else
+ host->data_offset = DATA_240A_OFFSET;
+
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
host->card_workqueue = alloc_workqueue("dw-mci-card",
WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
@@ -2337,18 +2366,6 @@ int dw_mci_probe(struct dw_mci *host)
goto err_workqueue;
}
- /*
- * In 2.40a spec, Data offset is changed.
- * Need to check the version-id and set data-offset for DATA register.
- */
- host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
- dev_info(host->dev, "Version ID is %04x\n", host->verid);
-
- if (host->verid < DW_MMC_240A)
- host->data_offset = DATA_OFFSET;
- else
- host->data_offset = DATA_240A_OFFSET;
-
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
@@ -2445,8 +2462,14 @@ int dw_mci_resume(struct dw_mci *host)
{
int i, ret;
- if (host->vmmc)
- regulator_enable(host->vmmc);
+ if (host->vmmc) {
+ ret = regulator_enable(host->vmmc);
+ if (ret) {
+ dev_err(host->dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ }
if (!mci_wait_reset(host->dev, host)) {
ret = -ENODEV;
@@ -2485,7 +2508,7 @@ EXPORT_SYMBOL(dw_mci_resume);
static int __init dw_mci_init(void)
{
- printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver");
+ pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 53b8fd9..0b74189 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -190,7 +190,6 @@ extern int dw_mci_resume(struct dw_mci *host);
* @prepare_command: handle CMD register extensions.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
- * @setup_bus: initialize io-interface
*
* Provide controller implementation specific extensions. The usage of this
* data structure is fully optional and usage of each member in this structure
@@ -203,7 +202,5 @@ struct dw_mci_drv_data {
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
- int (*setup_bus)(struct dw_mci *host,
- struct device_node *slot_np, u8 bus_width);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 372e921..375c109 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1141,6 +1141,11 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) &&
+ regulator_is_enabled(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+
break;
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc))
@@ -1155,6 +1160,10 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) &&
+ !regulator_is_enabled(mmc->supply.vqmmc))
+ regulator_enable(mmc->supply.vqmmc);
+
pwr |= MCI_PWR_ON;
break;
}
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 7c0af0e..0ee4a57 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -43,7 +43,6 @@
#include <asm/sizes.h>
#include <linux/platform_data/mmc-msm_sdcc.h>
-#include <mach/msm_iomap.h>
#include <mach/dma.h>
#include <mach/clk.h>
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 145cdaf..8960fc8 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -119,10 +119,8 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
host->pio_size = data->blocks * data->blksz;
host->pio_ptr = sg_virt(data->sg);
if (!nodma)
- pr_debug("%s: fallback to PIO for data "
- "at 0x%p size %d\n",
- mmc_hostname(host->mmc),
- host->pio_ptr, host->pio_size);
+ dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
+ host->pio_ptr, host->pio_size);
return 1;
} else {
dma_addr_t phys_addr;
@@ -473,8 +471,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
if (mrq->data)
err_status = mvsd_finish_data(host, mrq->data, err_status);
if (err_status) {
- pr_err("%s: unhandled error status %#04x\n",
- mmc_hostname(host->mmc), err_status);
+ dev_err(host->dev, "unhandled error status %#04x\n",
+ err_status);
cmd->error = -ENOMSG;
}
@@ -491,9 +489,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
if (irq_handled)
return IRQ_HANDLED;
- pr_err("%s: unhandled interrupt status=0x%04x en=0x%04x "
- "pio=%d\n", mmc_hostname(host->mmc), intr_status,
- host->intr_en, host->pio_size);
+ dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
+ intr_status, host->intr_en, host->pio_size);
return IRQ_NONE;
}
@@ -507,13 +504,11 @@ static void mvsd_timeout_timer(unsigned long data)
spin_lock_irqsave(&host->lock, flags);
mrq = host->mrq;
if (mrq) {
- pr_err("%s: Timeout waiting for hardware interrupt.\n",
- mmc_hostname(host->mmc));
- pr_err("%s: hw_state=0x%04x, intr_status=0x%04x "
- "intr_en=0x%04x\n", mmc_hostname(host->mmc),
- mvsd_read(MVSD_HW_STATE),
- mvsd_read(MVSD_NOR_INTR_STATUS),
- mvsd_read(MVSD_NOR_INTR_EN));
+ dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
+ dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
+ mvsd_read(MVSD_HW_STATE),
+ mvsd_read(MVSD_NOR_INTR_STATUS),
+ mvsd_read(MVSD_NOR_INTR_EN));
host->mrq = NULL;
@@ -741,8 +736,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
goto out;
}
host->base_clock = mvsd_data->clock / 2;
- gpio_card_detect = mvsd_data->gpio_card_detect;
- gpio_write_protect = mvsd_data->gpio_write_protect;
+ gpio_card_detect = mvsd_data->gpio_card_detect ? : -EINVAL;
+ gpio_write_protect = mvsd_data->gpio_write_protect ? : -EINVAL;
}
mmc->ops = &mvsd_ops;
@@ -778,7 +773,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
if (ret) {
- pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
+ dev_err(&pdev->dev, "cannot assign irq %d\n", irq);
goto out;
}
@@ -797,13 +792,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (ret)
goto out;
- pr_notice("%s: %s driver initialized, ",
- mmc_hostname(mmc), DRIVER_NAME);
if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
- printk("using GPIO %d for card detection\n",
- gpio_card_detect);
+ dev_notice(&pdev->dev, "using GPIO %d for card detection\n",
+ gpio_card_detect);
else
- printk("lacking card detect (fall back to polling)\n");
+ dev_notice(&pdev->dev, "lacking card detect (fall back to polling)\n");
return 0;
out:
@@ -881,18 +874,7 @@ static struct platform_driver mvsd_driver = {
},
};
-static int __init mvsd_init(void)
-{
- return platform_driver_probe(&mvsd_driver, mvsd_probe);
-}
-
-static void __exit mvsd_exit(void)
-{
- platform_driver_unregister(&mvsd_driver);
-}
-
-module_init(mvsd_init);
-module_exit(mvsd_exit);
+module_platform_driver_probe(mvsd_driver, mvsd_probe);
/* maximum card clock frequency (default 50MHz) */
module_param(maxfreq, int, 0);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index a72936e..d503635 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -34,10 +34,14 @@
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
#include <asm/dma.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
#include <linux/platform_data/mmc-mxcmmc.h>
#include <linux/platform_data/dma-imx.h>
@@ -115,6 +119,7 @@
enum mxcmci_type {
IMX21_MMC,
IMX31_MMC,
+ MPC512X_MMC,
};
struct mxcmci_host {
@@ -160,7 +165,7 @@ struct mxcmci_host {
enum mxcmci_type devtype;
};
-static struct platform_device_id mxcmci_devtype[] = {
+static const struct platform_device_id mxcmci_devtype[] = {
{
.name = "imx21-mmc",
.driver_data = IMX21_MMC,
@@ -168,16 +173,72 @@ static struct platform_device_id mxcmci_devtype[] = {
.name = "imx31-mmc",
.driver_data = IMX31_MMC,
}, {
+ .name = "mpc512x-sdhc",
+ .driver_data = MPC512X_MMC,
+ }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
+static const struct of_device_id mxcmci_of_match[] = {
+ {
+ .compatible = "fsl,imx21-mmc",
+ .data = &mxcmci_devtype[IMX21_MMC],
+ }, {
+ .compatible = "fsl,imx31-mmc",
+ .data = &mxcmci_devtype[IMX31_MMC],
+ }, {
+ .compatible = "fsl,mpc5121-sdhc",
+ .data = &mxcmci_devtype[MPC512X_MMC],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, mxcmci_of_match);
+
static inline int is_imx31_mmc(struct mxcmci_host *host)
{
return host->devtype == IMX31_MMC;
}
+static inline int is_mpc512x_mmc(struct mxcmci_host *host)
+{
+ return host->devtype == MPC512X_MMC;
+}
+
+static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ return ioread32be(host->base + reg);
+ else
+ return readl(host->base + reg);
+}
+
+static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ iowrite32be(val, host->base + reg);
+ else
+ writel(val, host->base + reg);
+}
+
+static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ return ioread32be(host->base + reg);
+ else
+ return readw(host->base + reg);
+}
+
+static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ iowrite32be(val, host->base + reg);
+ else
+ writew(val, host->base + reg);
+}
+
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
static inline void mxcmci_init_ocr(struct mxcmci_host *host)
@@ -229,17 +290,40 @@ static void mxcmci_softreset(struct mxcmci_host *host)
dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
/* reset sequence */
- writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
- writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
- host->base + MMC_REG_STR_STP_CLK);
+ mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
+ mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
+ MMC_REG_STR_STP_CLK);
for (i = 0; i < 8; i++)
- writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
+ mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
- writew(0xff, host->base + MMC_REG_RES_TO);
+ mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
}
static int mxcmci_setup_dma(struct mmc_host *mmc);
+#if IS_ENABLED(CONFIG_PPC_MPC512x)
+static inline void buffer_swap32(u32 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < ((len + 3) / 4); i++) {
+ st_le32(buf, *buf);
+ buf++;
+ }
+}
+
+static void mxcmci_swap_buffers(struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(data->sg, sg, data->sg_len, i)
+ buffer_swap32(sg_virt(sg), sg->length);
+}
+#else
+static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
+#endif
+
static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
@@ -255,8 +339,8 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
host->data = data;
data->bytes_xfered = 0;
- writew(nob, host->base + MMC_REG_NOB);
- writew(blksz, host->base + MMC_REG_BLK_LEN);
+ mxcmci_writew(host, nob, MMC_REG_NOB);
+ mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
host->datasize = datasize;
if (!mxcmci_use_dma(host))
@@ -275,6 +359,8 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
} else {
host->dma_dir = DMA_TO_DEVICE;
slave_dirn = DMA_MEM_TO_DEV;
+
+ mxcmci_swap_buffers(data);
}
nents = dma_map_sg(host->dma->device->dev, data->sg,
@@ -312,13 +398,13 @@ static void mxcmci_dma_callback(void *data)
del_timer(&host->watchdog);
- stat = readl(host->base + MMC_REG_STATUS);
- writel(stat & ~STATUS_DATA_TRANS_DONE, host->base + MMC_REG_STATUS);
+ stat = mxcmci_readl(host, MMC_REG_STATUS);
+ mxcmci_writel(host, stat & ~STATUS_DATA_TRANS_DONE, MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
if (stat & STATUS_READ_OP_DONE)
- writel(STATUS_READ_OP_DONE, host->base + MMC_REG_STATUS);
+ mxcmci_writel(host, STATUS_READ_OP_DONE, MMC_REG_STATUS);
mxcmci_data_done(host, stat);
}
@@ -366,12 +452,12 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
spin_lock_irqsave(&host->lock, flags);
if (host->use_sdio)
int_cntr |= INT_SDIO_IRQ_EN;
- writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
spin_unlock_irqrestore(&host->lock, flags);
- writew(cmd->opcode, host->base + MMC_REG_CMD);
- writel(cmd->arg, host->base + MMC_REG_ARG);
- writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
+ mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
+ mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
+ mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
return 0;
}
@@ -385,7 +471,7 @@ static void mxcmci_finish_request(struct mxcmci_host *host,
spin_lock_irqsave(&host->lock, flags);
if (host->use_sdio)
int_cntr |= INT_SDIO_IRQ_EN;
- writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
spin_unlock_irqrestore(&host->lock, flags);
host->req = NULL;
@@ -400,9 +486,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
struct mmc_data *data = host->data;
int data_error;
- if (mxcmci_use_dma(host))
+ if (mxcmci_use_dma(host)) {
dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
+ mxcmci_swap_buffers(data);
+ }
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -460,14 +548,14 @@ static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
for (i = 0; i < 4; i++) {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
+ a = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ b = mxcmci_readw(host, MMC_REG_RES_FIFO);
cmd->resp[i] = a << 16 | b;
}
} else {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- c = readw(host->base + MMC_REG_RES_FIFO);
+ a = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ b = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ c = mxcmci_readw(host, MMC_REG_RES_FIFO);
cmd->resp[0] = a << 24 | b << 8 | c >> 8;
}
}
@@ -479,7 +567,7 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
unsigned long timeout = jiffies + HZ;
do {
- stat = readl(host->base + MMC_REG_STATUS);
+ stat = mxcmci_readl(host, MMC_REG_STATUS);
if (stat & STATUS_ERR_MASK)
return stat;
if (time_after(jiffies, timeout)) {
@@ -503,7 +591,7 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
if (stat)
return stat;
- *buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS);
+ *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
bytes -= 4;
}
@@ -515,7 +603,7 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
if (stat)
return stat;
- tmp = readl(host->base + MMC_REG_BUFFER_ACCESS);
+ tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
memcpy(b, &tmp, bytes);
}
@@ -531,7 +619,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
if (stat)
return stat;
- writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS);
+ mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
bytes -= 4;
}
@@ -544,7 +632,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
return stat;
memcpy(&tmp, b, bytes);
- writel(tmp, host->base + MMC_REG_BUFFER_ACCESS);
+ mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
}
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
@@ -590,8 +678,8 @@ static void mxcmci_datawork(struct work_struct *work)
datawork);
int datastat = mxcmci_transfer_data(host);
- writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
- host->base + MMC_REG_STATUS);
+ mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ MMC_REG_STATUS);
mxcmci_finish_data(host, datastat);
if (host->req->stop) {
@@ -606,24 +694,40 @@ static void mxcmci_datawork(struct work_struct *work)
static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
{
- struct mmc_data *data = host->data;
+ struct mmc_request *req;
int data_error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
- if (!data)
+ if (!host->req) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
+
+ req = host->req;
+ if (!req->stop)
+ host->req = NULL; /* we will handle finish req below */
data_error = mxcmci_finish_data(host, stat);
+ spin_unlock_irqrestore(&host->lock, flags);
+
mxcmci_read_response(host, stat);
host->cmd = NULL;
- if (host->req->stop) {
- if (mxcmci_start_cmd(host, host->req->stop, 0)) {
- mxcmci_finish_request(host, host->req);
+ if (req->stop) {
+ if (mxcmci_start_cmd(host, req->stop, 0)) {
+ mxcmci_finish_request(host, req);
return;
}
} else {
- mxcmci_finish_request(host, host->req);
+ mxcmci_finish_request(host, req);
}
}
@@ -653,9 +757,11 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
bool sdio_irq;
u32 stat;
- stat = readl(host->base + MMC_REG_STATUS);
- writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
- STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
+ stat = mxcmci_readl(host, MMC_REG_STATUS);
+ mxcmci_writel(host,
+ stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
+ STATUS_WRITE_OP_DONE),
+ MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
@@ -665,11 +771,11 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
if (mxcmci_use_dma(host) &&
(stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
- writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
- host->base + MMC_REG_STATUS);
+ mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ MMC_REG_STATUS);
if (sdio_irq) {
- writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
+ mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
mmc_signal_sdio_irq(host->mmc);
}
@@ -751,7 +857,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
prescaler <<= 1;
}
- writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE);
+ mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
prescaler, divider, clk_in, clk_ios);
@@ -814,9 +920,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->clock) {
mxcmci_set_clk_rate(host, ios->clock);
- writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
+ mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
} else {
- writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
+ mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
}
host->clock = ios->clock;
@@ -839,10 +945,11 @@ static int mxcmci_get_ro(struct mmc_host *mmc)
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
+ * If board doesn't support read only detection (no mmc_gpio
+ * context or gpio is invalid), then let the mmc core decide
+ * what to do.
*/
- return -ENOSYS;
+ return mmc_gpio_get_ro(mmc);
}
static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -853,14 +960,14 @@ static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_lock_irqsave(&host->lock, flags);
host->use_sdio = enable;
- int_cntr = readl(host->base + MMC_REG_INT_CNTR);
+ int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
if (enable)
int_cntr |= INT_SDIO_IRQ_EN;
else
int_cntr &= ~INT_SDIO_IRQ_EN;
- writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -898,7 +1005,7 @@ static void mxcmci_watchdog(unsigned long data)
struct mmc_host *mmc = (struct mmc_host *)data;
struct mxcmci_host *host = mmc_priv(mmc);
struct mmc_request *req = host->req;
- unsigned int stat = readl(host->base + MMC_REG_STATUS);
+ unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
if (host->dma_dir == DMA_FROM_DEVICE) {
dmaengine_terminate_all(host->dma);
@@ -914,7 +1021,8 @@ static void mxcmci_watchdog(unsigned long data)
/* Mark transfer as erroneus and inform the upper layers */
- host->data->error = -ETIMEDOUT;
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
host->req = NULL;
host->cmd = NULL;
host->data = NULL;
@@ -935,9 +1043,14 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host = NULL;
struct resource *iores, *r;
int ret = 0, irq;
+ bool dat3_card_detect = false;
dma_cap_mask_t mask;
+ const struct of_device_id *of_id;
+ struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
- pr_info("i.MX SDHC driver\n");
+ pr_info("i.MX/MPC512x SDHC driver\n");
+
+ of_id = of_match_device(mxcmci_of_match, &pdev->dev);
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -954,11 +1067,16 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_release_mem;
}
+ mmc_of_parse(mmc);
mmc->ops = &mxcmci_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+
+ /* For devicetree parsing, the bus width is read from devicetree */
+ if (pdata)
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ else
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
- mmc->max_segs = 64;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
@@ -971,14 +1089,30 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free;
}
+ if (of_id) {
+ const struct platform_device_id *id_entry = of_id->data;
+ host->devtype = id_entry->driver_data;
+ } else {
+ host->devtype = pdev->id_entry->driver_data;
+ }
+
+ /* adjust max_segs after devtype detection */
+ if (!is_mpc512x_mmc(host))
+ mmc->max_segs = 64;
+
host->mmc = mmc;
- host->pdata = pdev->dev.platform_data;
- host->devtype = pdev->id_entry->driver_data;
+ host->pdata = pdata;
spin_lock_init(&host->lock);
+ if (pdata)
+ dat3_card_detect = pdata->dat3_card_detect;
+ else if (!(mmc->caps & MMC_CAP_NONREMOVABLE)
+ && !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
+ dat3_card_detect = true;
+
mxcmci_init_ocr(host);
- if (host->pdata && host->pdata->dat3_card_detect)
+ if (dat3_card_detect)
host->default_irq_mask =
INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
else
@@ -1004,7 +1138,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mxcmci_softreset(host);
- host->rev_no = readw(host->base + MMC_REG_REV_NO);
+ host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
if (host->rev_no != 0x400) {
ret = -ENODEV;
dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
@@ -1016,25 +1150,28 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->f_max = clk_get_rate(host->clk_per) >> 1;
/* recommended in data sheet */
- writew(0x2db4, host->base + MMC_REG_READ_TO);
-
- writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r) {
- host->dmareq = r->start;
- host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
- host->dma_data.priority = DMA_PRIO_LOW;
- host->dma_data.dma_request = host->dmareq;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma = dma_request_channel(mask, filter, host);
- if (host->dma)
- mmc->max_seg_size = dma_get_max_seg_size(
- host->dma->device->dev);
- }
+ mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
+
+ mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
- if (!host->dma)
+ if (!host->pdata) {
+ host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r) {
+ host->dmareq = r->start;
+ host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
+ host->dma_data.priority = DMA_PRIO_LOW;
+ host->dma_data.dma_request = host->dmareq;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma = dma_request_channel(mask, filter, host);
+ }
+ }
+ if (host->dma)
+ mmc->max_seg_size = dma_get_max_seg_size(
+ host->dma->device->dev);
+ else
dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
INIT_WORK(&host->datawork, mxcmci_datawork);
@@ -1052,12 +1189,12 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free_irq;
}
- mmc_add_host(mmc);
-
init_timer(&host->watchdog);
host->watchdog.function = &mxcmci_watchdog;
host->watchdog.data = (unsigned long)mmc;
+ mmc_add_host(mmc);
+
return 0;
out_free_irq:
@@ -1153,6 +1290,7 @@ static struct platform_driver mxcmci_driver = {
#ifdef CONFIG_PM
.pm = &mxcmci_pm_ops,
#endif
+ .of_match_table = mxcmci_of_match,
}
};
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4efe302..4278a17 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -72,6 +72,9 @@ struct mxs_mmc_host {
int sdio_irq_en;
int wp_gpio;
bool wp_inverted;
+ bool cd_inverted;
+ bool broken_cd;
+ bool non_removable;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
@@ -95,8 +98,9 @@ static int mxs_mmc_get_cd(struct mmc_host *mmc)
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
- return !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
- BM_SSP_STATUS_CARD_DETECT);
+ return host->non_removable || host->broken_cd ||
+ !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
+ BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
}
static void mxs_mmc_reset(struct mxs_mmc_host *host)
@@ -548,22 +552,6 @@ static const struct mmc_host_ops mxs_mmc_ops = {
.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
};
-static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
-{
- struct mxs_mmc_host *host = param;
- struct mxs_ssp *ssp = &host->ssp;
-
- if (!mxs_dma_is_apbh(chan))
- return false;
-
- if (chan->chan_id != ssp->dma_channel)
- return false;
-
- chan->private = &ssp->dma_data;
-
- return true;
-}
-
static struct platform_device_id mxs_ssp_ids[] = {
{
.name = "imx23-mmc",
@@ -591,20 +579,17 @@ static int mxs_mmc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
- struct resource *iores, *dmares;
+ struct resource *iores;
struct pinctrl *pinctrl;
- int ret = 0, irq_err, irq_dma;
- dma_cap_mask_t mask;
+ int ret = 0, irq_err;
struct regulator *reg_vmmc;
enum of_gpio_flags flags;
struct mxs_ssp *ssp;
u32 bus_width = 0;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
irq_err = platform_get_irq(pdev, 0);
- irq_dma = platform_get_irq(pdev, 1);
- if (!iores || irq_err < 0 || irq_dma < 0)
+ if (!iores || irq_err < 0)
return -EINVAL;
mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
@@ -620,23 +605,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
goto out_mmc_free;
}
- if (np) {
- ssp->devid = (enum mxs_ssp_id) of_id->data;
- /*
- * TODO: This is a temporary solution and should be changed
- * to use generic DMA binding later when the helpers get in.
- */
- ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
- &ssp->dma_channel);
- if (ret) {
- dev_err(mmc_dev(host->mmc),
- "failed to get dma channel\n");
- goto out_mmc_free;
- }
- } else {
- ssp->devid = pdev->id_entry->driver_data;
- ssp->dma_channel = dmares->start;
- }
+ ssp->devid = (enum mxs_ssp_id) of_id->data;
host->mmc = mmc;
host->sdio_irq_en = 0;
@@ -666,10 +635,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mxs_mmc_reset(host);
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- ssp->dma_data.chan_irq = irq_dma;
- ssp->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
+ ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!ssp->dmach) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
@@ -686,11 +652,16 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ host->broken_cd = of_property_read_bool(np, "broken-cd");
+ host->non_removable = of_property_read_bool(np, "non-removable");
+ if (host->non_removable)
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
-
if (flags & OF_GPIO_ACTIVE_LOW)
host->wp_inverted = 1;
+ host->cd_inverted = of_property_read_bool(np, "cd-inverted");
+
mmc->f_min = 400000;
mmc->f_max = 288000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index bc58078..6e44025 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1717,6 +1717,12 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
struct omap_mmc_platform_data *pdata;
struct device_node *np = dev->of_node;
u32 bus_width, max_freq;
+ int cd_gpio, wp_gpio;
+
+ cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -1727,8 +1733,8 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
/* This driver only supports 1 slot */
pdata->nr_slots = 1;
- pdata->slots[0].switch_pin = of_get_named_gpio(np, "cd-gpios", 0);
- pdata->slots[0].gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
+ pdata->slots[0].switch_pin = cd_gpio;
+ pdata->slots[0].gpio_wp = wp_gpio;
if (of_find_property(np, "ti,non-removable", NULL)) {
pdata->slots[0].nonremovable = true;
@@ -1774,6 +1780,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
pdata = of_get_hsmmc_pdata(&pdev->dev);
+
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
if (match->data) {
const u16 *offsetp = match->data;
pdata->reg_offset = *offsetp;
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index f981f7d..ad13f42 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -57,6 +57,9 @@ struct realtek_pci_sdmmc {
bool eject;
bool initial_mode;
bool ddr_mode;
+ int power_state;
+#define SDMMC_POWER_ON 1
+#define SDMMC_POWER_OFF 0
};
static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
@@ -765,6 +768,9 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
struct rtsx_pcr *pcr = host->pcr;
int err;
+ if (host->power_state == SDMMC_POWER_ON)
+ return 0;
+
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
@@ -787,6 +793,7 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
if (err < 0)
return err;
+ host->power_state = SDMMC_POWER_ON;
return 0;
}
@@ -795,6 +802,8 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
struct rtsx_pcr *pcr = host->pcr;
int err;
+ host->power_state = SDMMC_POWER_OFF;
+
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0);
@@ -1260,6 +1269,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
host->pcr = pcr;
host->mmc = mmc;
host->pdev = pdev;
+ host->power_state = SDMMC_POWER_OFF;
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 63fb265..8d6794c 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -25,14 +25,93 @@
#include <mach/dma.h>
-#include <mach/regs-sdi.h>
-
#include <linux/platform_data/mmc-s3cmci.h>
#include "s3cmci.h"
#define DRIVER_NAME "s3c-mci"
+#define S3C2410_SDICON (0x00)
+#define S3C2410_SDIPRE (0x04)
+#define S3C2410_SDICMDARG (0x08)
+#define S3C2410_SDICMDCON (0x0C)
+#define S3C2410_SDICMDSTAT (0x10)
+#define S3C2410_SDIRSP0 (0x14)
+#define S3C2410_SDIRSP1 (0x18)
+#define S3C2410_SDIRSP2 (0x1C)
+#define S3C2410_SDIRSP3 (0x20)
+#define S3C2410_SDITIMER (0x24)
+#define S3C2410_SDIBSIZE (0x28)
+#define S3C2410_SDIDCON (0x2C)
+#define S3C2410_SDIDCNT (0x30)
+#define S3C2410_SDIDSTA (0x34)
+#define S3C2410_SDIFSTA (0x38)
+
+#define S3C2410_SDIDATA (0x3C)
+#define S3C2410_SDIIMSK (0x40)
+
+#define S3C2440_SDIDATA (0x40)
+#define S3C2440_SDIIMSK (0x3C)
+
+#define S3C2440_SDICON_SDRESET (1 << 8)
+#define S3C2410_SDICON_SDIOIRQ (1 << 3)
+#define S3C2410_SDICON_FIFORESET (1 << 1)
+#define S3C2410_SDICON_CLOCKTYPE (1 << 0)
+
+#define S3C2410_SDICMDCON_LONGRSP (1 << 10)
+#define S3C2410_SDICMDCON_WAITRSP (1 << 9)
+#define S3C2410_SDICMDCON_CMDSTART (1 << 8)
+#define S3C2410_SDICMDCON_SENDERHOST (1 << 6)
+#define S3C2410_SDICMDCON_INDEX (0x3f)
+
+#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12)
+#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11)
+#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10)
+#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9)
+
+#define S3C2440_SDIDCON_DS_WORD (2 << 22)
+#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20)
+#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19)
+#define S3C2410_SDIDCON_BLOCKMODE (1 << 17)
+#define S3C2410_SDIDCON_WIDEBUS (1 << 16)
+#define S3C2410_SDIDCON_DMAEN (1 << 15)
+#define S3C2410_SDIDCON_STOP (1 << 14)
+#define S3C2440_SDIDCON_DATSTART (1 << 14)
+
+#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12)
+#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12)
+
+#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF)
+
+#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9)
+#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8)
+#define S3C2410_SDIDSTA_CRCFAIL (1 << 7)
+#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6)
+#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5)
+#define S3C2410_SDIDSTA_XFERFINISH (1 << 4)
+#define S3C2410_SDIDSTA_TXDATAON (1 << 1)
+#define S3C2410_SDIDSTA_RXDATAON (1 << 0)
+
+#define S3C2440_SDIFSTA_FIFORESET (1 << 16)
+#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14)
+#define S3C2410_SDIFSTA_TFDET (1 << 13)
+#define S3C2410_SDIFSTA_RFDET (1 << 12)
+#define S3C2410_SDIFSTA_COUNTMASK (0x7f)
+
+#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17)
+#define S3C2410_SDIIMSK_CMDSENT (1 << 16)
+#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15)
+#define S3C2410_SDIIMSK_RESPONSEND (1 << 14)
+#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12)
+#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11)
+#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10)
+#define S3C2410_SDIIMSK_DATACRC (1 << 9)
+#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8)
+#define S3C2410_SDIIMSK_DATAFINISH (1 << 7)
+#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4)
+#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2)
+#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0)
+
enum dbg_channels {
dbg_err = (1 << 0),
dbg_debug = (1 << 1),
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 2592ddd..7bcf74b 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -195,6 +195,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->mmc->pm_caps |= c->slot->pm_caps;
}
+ host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+
err = sdhci_add_host(host);
if (err)
goto err_free;
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 8ffea05..d49bc95 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -124,7 +124,7 @@ unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
return MIN_FREQ;
}
-static struct sdhci_ops bcm2835_sdhci_ops = {
+static const struct sdhci_ops bcm2835_sdhci_ops = {
.write_l = bcm2835_sdhci_writel,
.write_w = bcm2835_sdhci_writew,
.write_b = bcm2835_sdhci_writeb,
@@ -135,7 +135,7 @@ static struct sdhci_ops bcm2835_sdhci_ops = {
.get_min_clock = bcm2835_sdhci_get_min_clock,
};
-static struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
.ops = &bcm2835_sdhci_ops,
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 30bfdc4..8ebb6b6 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -16,7 +16,6 @@
#include <linux/device.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
-#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
@@ -80,12 +79,12 @@ out:
host->clock = clock;
}
-static struct sdhci_ops sdhci_cns3xxx_ops = {
+static const struct sdhci_ops sdhci_cns3xxx_ops = {
.get_max_clock = sdhci_cns3xxx_get_max_clk,
.set_clock = sdhci_cns3xxx_set_clock,
};
-static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
.ops = &sdhci_cns3xxx_ops,
.quirks = SDHCI_QUIRK_BROKEN_DMA |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 169fab9..15e7803 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -83,12 +83,12 @@ static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
return ret;
}
-static struct sdhci_ops sdhci_dove_ops = {
+static const struct sdhci_ops sdhci_dove_ops = {
.read_w = sdhci_dove_readw,
.read_l = sdhci_dove_readl,
};
-static struct sdhci_pltfm_data sdhci_dove_pdata = {
+static const struct sdhci_pltfm_data sdhci_dove_pdata = {
.ops = &sdhci_dove_ops,
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 78ac002..67d6dde 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -399,7 +399,7 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
return 0;
}
-static struct sdhci_ops sdhci_esdhc_ops = {
+static const struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
.write_l = esdhc_writel_le,
@@ -412,7 +412,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.platform_bus_width = esdhc_pltfm_bus_width,
};
-static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT
| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
| SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index f32526d..5e68adc 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -230,7 +230,7 @@ static void esdhc_of_platform_init(struct sdhci_host *host)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
}
-static struct sdhci_ops sdhci_esdhc_ops = {
+static const struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl,
.read_w = esdhc_readw,
.read_b = esdhc_readb,
@@ -249,7 +249,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.adma_workaround = esdhci_of_adma_workaround,
};
-static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
+static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
/*
* card detection could be handled via GPIO
* eSDHC cannot support End Attribute in NOP ADMA descriptor
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index c3d3715..200a6a9 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -51,7 +51,7 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
udelay(SDHCI_HLWD_WRITE_DELAY);
}
-static struct sdhci_ops sdhci_hlwd_ops = {
+static const struct sdhci_ops sdhci_hlwd_ops = {
.read_l = sdhci_be32bs_readl,
.read_w = sdhci_be32bs_readw,
.read_b = sdhci_be32bs_readb,
@@ -60,7 +60,7 @@ static struct sdhci_ops sdhci_hlwd_ops = {
.write_b = sdhci_hlwd_writeb,
};
-static struct sdhci_pltfm_data sdhci_hlwd_pdata = {
+static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE,
.ops = &sdhci_hlwd_ops,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index c7ccf30..0012d3f 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -975,7 +975,7 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host)
usleep_range(300, 1000);
}
-static struct sdhci_ops sdhci_pci_ops = {
+static const struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
.platform_bus_width = sdhci_pci_bus_width,
.hw_reset = sdhci_pci_hw_reset,
@@ -1279,6 +1279,8 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->slotno = slotno;
+ host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 3145a78..cd0f1f6 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -44,7 +44,7 @@ unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
-static struct sdhci_ops sdhci_pltfm_ops = {
+static const struct sdhci_ops sdhci_pltfm_ops = {
};
#ifdef CONFIG_OF
@@ -94,6 +94,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
of_device_is_compatible(np, "fsl,p1010-esdhc") ||
+ of_device_is_compatible(np, "fsl,t4240-esdhc") ||
of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
@@ -114,7 +115,7 @@ void sdhci_get_of_property(struct platform_device *pdev) {}
EXPORT_SYMBOL_GPL(sdhci_get_of_property);
struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
- struct sdhci_pltfm_data *pdata)
+ const struct sdhci_pltfm_data *pdata)
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -201,7 +202,7 @@ void sdhci_pltfm_free(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
int sdhci_pltfm_register(struct platform_device *pdev,
- struct sdhci_pltfm_data *pdata)
+ const struct sdhci_pltfm_data *pdata)
{
struct sdhci_host *host;
int ret = 0;
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 153b6c5..1210ed1 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -16,7 +16,7 @@
#include "sdhci.h"
struct sdhci_pltfm_data {
- struct sdhci_ops *ops;
+ const struct sdhci_ops *ops;
unsigned int quirks;
};
@@ -91,11 +91,11 @@ static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
extern void sdhci_get_of_property(struct platform_device *pdev);
extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
- struct sdhci_pltfm_data *pdata);
+ const struct sdhci_pltfm_data *pdata);
extern void sdhci_pltfm_free(struct platform_device *pdev);
extern int sdhci_pltfm_register(struct platform_device *pdev,
- struct sdhci_pltfm_data *pdata);
+ const struct sdhci_pltfm_data *pdata);
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index eeb7d43..6a3f702 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -111,7 +111,7 @@ static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
return 0;
}
-static struct sdhci_ops pxav2_sdhci_ops = {
+static const struct sdhci_ops pxav2_sdhci_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.platform_reset_exit = pxav2_set_private_registers,
.platform_bus_width = pxav2_mmc_set_width,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index a0cdbc5..1ae358e 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -167,13 +167,21 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
return 0;
}
-static struct sdhci_ops pxav3_sdhci_ops = {
+static const struct sdhci_ops pxav3_sdhci_ops = {
.platform_reset_exit = pxav3_set_private_registers,
.set_uhs_signaling = pxav3_set_uhs_signaling,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
};
+static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_32BIT_ADMA_SIZE
+ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .ops = &pxav3_sdhci_ops,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id sdhci_pxav3_of_match[] = {
{
@@ -187,29 +195,16 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
{
struct sdhci_pxa_platdata *pdata;
struct device_node *np = dev->of_node;
- u32 bus_width;
u32 clk_delay_cycles;
- enum of_gpio_flags gpio_flags;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- if (of_find_property(np, "non-removable", NULL))
- pdata->flags |= PXA_FLAG_CARD_PERMANENT;
-
- of_property_read_u32(np, "bus-width", &bus_width);
- if (bus_width == 8)
- pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
-
of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
if (clk_delay_cycles > 0)
pdata->clk_delay_cycles = clk_delay_cycles;
- pdata->ext_cd_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &gpio_flags);
- if (gpio_flags != OF_GPIO_ACTIVE_LOW)
- pdata->host_caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
-
return pdata;
}
#else
@@ -235,7 +230,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!pxa)
return -ENOMEM;
- host = sdhci_pltfm_init(pdev, NULL);
+ host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata);
if (IS_ERR(host)) {
kfree(pxa);
return PTR_ERR(host);
@@ -252,24 +247,18 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
pltfm_host->clk = clk;
clk_prepare_enable(clk);
- host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
- | SDHCI_QUIRK_32BIT_ADMA_SIZE
- | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
-
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
- if (match)
+ if (match) {
+ mmc_of_parse(host->mmc);
+ sdhci_get_of_property(pdev);
pdata = pxav3_get_mmc_pdata(dev);
-
- if (pdata) {
- if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
- /* on-chip device */
- host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ } else if (pdata) {
+ /* on-chip device */
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
host->mmc->caps |= MMC_CAP_NONREMOVABLE;
- }
/* If slot design supports 8 bit data, indicate this to MMC. */
if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
@@ -296,10 +285,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
}
}
- host->ops = &pxav3_sdhci_ops;
-
- sdhci_get_of_property(pdev);
-
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
@@ -317,7 +302,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- if (pdata->pm_caps & MMC_PM_KEEP_POWER) {
+ if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) {
device_init_wakeup(&pdev->dev, 1);
host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
} else {
diff --git a/drivers/mmc/host/sdhci-s3c-regs.h b/drivers/mmc/host/sdhci-s3c-regs.h
new file mode 100644
index 0000000..e34049ad
--- /dev/null
+++ b/drivers/mmc/host/sdhci-s3c-regs.h
@@ -0,0 +1,87 @@
+/* linux/arch/arm/plat-s3c/include/plat/regs-sdhci.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C Platform - SDHCI (HSMMC) register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_S3C_SDHCI_REGS_H
+#define __PLAT_S3C_SDHCI_REGS_H __FILE__
+
+#define S3C_SDHCI_CONTROL2 (0x80)
+#define S3C_SDHCI_CONTROL3 (0x84)
+#define S3C64XX_SDHCI_CONTROL4 (0x8C)
+
+#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR (1 << 31)
+#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK (1 << 30)
+#define S3C_SDHCI_CTRL2_CDINVRXD3 (1 << 29)
+#define S3C_SDHCI_CTRL2_SLCARDOUT (1 << 28)
+
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
+#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
+#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL2_ENFBCLKTX (1 << 15)
+#define S3C_SDHCI_CTRL2_ENFBCLKRX (1 << 14)
+#define S3C_SDHCI_CTRL2_SDCDSEL (1 << 13)
+#define S3C_SDHCI_CTRL2_SDSIGPC (1 << 12)
+#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART (1 << 11)
+
+#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
+#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
+
+#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD (1 << 8)
+#define S3C_SDHCI_CTRL2_RWAITMODE (1 << 7)
+#define S3C_SDHCI_CTRL2_DISBUFRD (1 << 6)
+#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
+#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
+#define S3C_SDHCI_CTRL2_PWRSYNC (1 << 3)
+#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON (1 << 1)
+#define S3C_SDHCI_CTRL2_HWINITFIN (1 << 0)
+
+#define S3C_SDHCI_CTRL3_FCSEL3 (1 << 31)
+#define S3C_SDHCI_CTRL3_FCSEL2 (1 << 23)
+#define S3C_SDHCI_CTRL3_FCSEL1 (1 << 15)
+#define S3C_SDHCI_CTRL3_FCSEL0 (1 << 7)
+
+#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
+#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
+#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
+#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
+#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
+#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
+#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
+
+#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
+#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
+#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
+
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
+
+#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
+
+#endif /* __PLAT_S3C_SDHCI_REGS_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 7363efe..c6f6246 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/mmc-sdhci-s3c.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -24,13 +25,10 @@
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/mmc/host.h>
-#include <plat/sdhci.h>
-#include <plat/regs-sdhci.h>
-
+#include "sdhci-s3c-regs.h"
#include "sdhci.h"
#define MAX_BUS_CLK (4)
@@ -45,7 +43,6 @@
* @ioarea: The resource created when we claimed the IO area.
* @pdata: The platform data for this controller.
* @cur_clk: The index of the current bus clock.
- * @gpios: List of gpio numbers parsed from device tree.
* @clk_io: The clock for the internal bus interface.
* @clk_bus: The clocks that are available for the SD/MMC bus clock.
*/
@@ -57,8 +54,6 @@ struct sdhci_s3c {
unsigned int cur_clk;
int ext_cd_irq;
int ext_cd_gpio;
- int *gpios;
- struct pinctrl *pctrl;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -447,88 +442,39 @@ static int sdhci_s3c_parse_dt(struct device *dev,
struct device_node *node = dev->of_node;
struct sdhci_s3c *ourhost = to_s3c(host);
u32 max_width;
- int gpio, cnt, ret;
+ int gpio;
/* if the bus-width property is not specified, assume width as 1 */
if (of_property_read_u32(node, "bus-width", &max_width))
max_width = 1;
pdata->max_width = max_width;
- ourhost->gpios = devm_kzalloc(dev, NUM_GPIOS(pdata->max_width) *
- sizeof(int), GFP_KERNEL);
- if (!ourhost->gpios)
- return -ENOMEM;
-
/* get the card detection method */
if (of_get_property(node, "broken-cd", NULL)) {
pdata->cd_type = S3C_SDHCI_CD_NONE;
- goto setup_bus;
+ return 0;
}
if (of_get_property(node, "non-removable", NULL)) {
pdata->cd_type = S3C_SDHCI_CD_PERMANENT;
- goto setup_bus;
+ return 0;
}
gpio = of_get_named_gpio(node, "cd-gpios", 0);
if (gpio_is_valid(gpio)) {
pdata->cd_type = S3C_SDHCI_CD_GPIO;
- goto found_cd;
- } else if (gpio != -ENOENT) {
- dev_err(dev, "invalid card detect gpio specified\n");
- return -EINVAL;
- }
-
- gpio = of_get_named_gpio(node, "samsung,cd-pinmux-gpio", 0);
- if (gpio_is_valid(gpio)) {
- pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
- goto found_cd;
- } else if (gpio != -ENOENT) {
- dev_err(dev, "invalid card detect gpio specified\n");
- return -EINVAL;
- }
-
- /* assuming internal card detect that will be configured by pinctrl */
- pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
- goto setup_bus;
-
- found_cd:
- if (pdata->cd_type == S3C_SDHCI_CD_GPIO) {
pdata->ext_cd_gpio = gpio;
ourhost->ext_cd_gpio = -1;
if (of_get_property(node, "cd-inverted", NULL))
pdata->ext_cd_gpio_invert = 1;
- } else if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
- ret = devm_gpio_request(dev, gpio, "sdhci-cd");
- if (ret) {
- dev_err(dev, "card detect gpio request failed\n");
- return -EINVAL;
- }
- ourhost->ext_cd_gpio = gpio;
- }
-
- setup_bus:
- if (!IS_ERR(ourhost->pctrl))
return 0;
-
- /* get the gpios for command, clock and data lines */
- for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) {
- gpio = of_get_gpio(node, cnt);
- if (!gpio_is_valid(gpio)) {
- dev_err(dev, "invalid gpio[%d]\n", cnt);
- return -EINVAL;
- }
- ourhost->gpios[cnt] = gpio;
- }
-
- for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) {
- ret = devm_gpio_request(dev, ourhost->gpios[cnt], "sdhci-gpio");
- if (ret) {
- dev_err(dev, "gpio[%d] request failed\n", cnt);
- return -EINVAL;
- }
+ } else if (gpio != -ENOENT) {
+ dev_err(dev, "invalid card detect gpio specified\n");
+ return -EINVAL;
}
+ /* assuming internal card detect that will be configured by pinctrl */
+ pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
return 0;
}
#else
@@ -589,8 +535,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_pdata_io_clk;
}
- sc->pctrl = devm_pinctrl_get_select_default(&pdev->dev);
-
if (pdev->dev.of_node) {
ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
if (ret)
@@ -608,7 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- sc->clk_io = clk_get(dev, "hsmmc");
+ sc->clk_io = devm_clk_get(dev, "hsmmc");
if (IS_ERR(sc->clk_io)) {
dev_err(dev, "failed to get io clock\n");
ret = PTR_ERR(sc->clk_io);
@@ -623,7 +567,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
char name[14];
snprintf(name, 14, "mmc_busclk.%d", ptr);
- clk = clk_get(dev, name);
+ clk = devm_clk_get(dev, name);
if (IS_ERR(clk))
continue;
@@ -764,15 +708,9 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
#ifndef CONFIG_PM_RUNTIME
clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif
- for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
- if (sc->clk_bus[ptr]) {
- clk_put(sc->clk_bus[ptr]);
- }
- }
err_no_busclks:
clk_disable_unprepare(sc->clk_io);
- clk_put(sc->clk_io);
err_pdata_io_clk:
sdhci_free_host(host);
@@ -785,7 +723,6 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
struct s3c_sdhci_platdata *pdata = sc->pdata;
- int ptr;
if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup)
pdata->ext_cd_cleanup(&sdhci_s3c_notify_change);
@@ -805,13 +742,7 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
#ifndef CONFIG_PM_RUNTIME
clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif
- for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
- if (sc->clk_bus[ptr]) {
- clk_put(sc->clk_bus[ptr]);
- }
- }
clk_disable_unprepare(sc->clk_io);
- clk_put(sc->clk_io);
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
new file mode 100644
index 0000000..09805af
--- /dev/null
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -0,0 +1,193 @@
+/*
+ * SDHCI support for SiRF primaII and marco SoCs
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include "sdhci-pltfm.h"
+
+struct sdhci_sirf_priv {
+ struct clk *clk;
+ int gpio_cd;
+};
+
+static unsigned int sdhci_sirf_get_max_clk(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_sirf_priv *priv = pltfm_host->priv;
+ return clk_get_rate(priv->clk);
+}
+
+static struct sdhci_ops sdhci_sirf_ops = {
+ .get_max_clock = sdhci_sirf_get_max_clk,
+};
+
+static struct sdhci_pltfm_data sdhci_sirf_pdata = {
+ .ops = &sdhci_sirf_ops,
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_DELAY_AFTER_POWER,
+};
+
+static int sdhci_sirf_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_sirf_priv *priv;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "unable to get pinmux");
+ return PTR_ERR(pinctrl);
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_sirf_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to allocate private data");
+ return -ENOMEM;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "unable to get clock");
+ return PTR_ERR(priv->clk);
+ }
+
+ if (pdev->dev.of_node) {
+ priv->gpio_cd = of_get_named_gpio(pdev->dev.of_node,
+ "cd-gpios", 0);
+ } else {
+ priv->gpio_cd = -EINVAL;
+ }
+
+ host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto err_sdhci_pltfm_init;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = priv;
+
+ sdhci_get_of_property(pdev);
+
+ clk_prepare_enable(priv->clk);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_sdhci_add;
+
+ /*
+ * We must request the IRQ after sdhci_add_host(), as the tasklet only
+ * gets setup in sdhci_add_host() and we oops.
+ */
+ if (gpio_is_valid(priv->gpio_cd)) {
+ ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd);
+ if (ret) {
+ dev_err(&pdev->dev, "card detect irq request failed: %d\n",
+ ret);
+ goto err_request_cd;
+ }
+ }
+
+ return 0;
+
+err_request_cd:
+ sdhci_remove_host(host, 0);
+err_sdhci_add:
+ clk_disable_unprepare(priv->clk);
+ sdhci_pltfm_free(pdev);
+err_sdhci_pltfm_init:
+ return ret;
+}
+
+static int sdhci_sirf_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_sirf_priv *priv = pltfm_host->priv;
+
+ sdhci_pltfm_unregister(pdev);
+
+ if (gpio_is_valid(priv->gpio_cd))
+ mmc_gpio_free_cd(host->mmc);
+
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_sirf_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_sirf_priv *priv = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int sdhci_sirf_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_sirf_priv *priv = pltfm_host->priv;
+ int ret;
+
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ dev_dbg(dev, "Resume: Error enabling clock\n");
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+
+static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume);
+#endif
+
+static const struct of_device_id sdhci_sirf_of_match[] = {
+ { .compatible = "sirf,prima2-sdhc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match);
+
+static struct platform_driver sdhci_sirf_driver = {
+ .driver = {
+ .name = "sdhci-sirf",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_sirf_of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &sdhci_sirf_pm_ops,
+#endif
+ },
+ .probe = sdhci_sirf_probe,
+ .remove = sdhci_sirf_remove,
+};
+
+module_platform_driver(sdhci_sirf_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for SiRFprimaII/SiRFmarco");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index c6ece0b..7ae5b3a 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -36,7 +36,7 @@ struct spear_sdhci {
};
/* sdhci ops */
-static struct sdhci_ops sdhci_pltfm_ops = {
+static const struct sdhci_ops sdhci_pltfm_ops = {
/* Nothing to do for now. */
};
@@ -291,7 +291,7 @@ static int sdhci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sdhci_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 08b06e9..e0dba74 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -24,6 +24,7 @@
#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <asm/gpio.h>
@@ -38,16 +39,13 @@
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
struct sdhci_tegra_soc_data {
- struct sdhci_pltfm_data *pdata;
+ const struct sdhci_pltfm_data *pdata;
u32 nvquirks;
};
struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
- int cd_gpio;
- int wp_gpio;
int power_gpio;
- int is_8bit;
};
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
@@ -107,23 +105,9 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
-
- if (!gpio_is_valid(tegra_host->wp_gpio))
- return -1;
-
- return gpio_get_value(tegra_host->wp_gpio);
+ return mmc_gpio_get_ro(host->mmc);
}
-static irqreturn_t carddetect_irq(int irq, void *data)
-{
- struct sdhci_host *sdhost = (struct sdhci_host *)data;
-
- tasklet_schedule(&sdhost->card_tasklet);
- return IRQ_HANDLED;
-};
-
static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -145,12 +129,11 @@ static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
u32 ctrl;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- if (tegra_host->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+ if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) &&
+ (bus_width == MMC_BUS_WIDTH_8)) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
ctrl |= SDHCI_CTRL_8BITBUS;
} else {
@@ -164,7 +147,7 @@ static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
return 0;
}
-static struct sdhci_ops tegra_sdhci_ops = {
+static const struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
@@ -173,8 +156,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
.platform_reset_exit = tegra_sdhci_reset_exit,
};
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
-static struct sdhci_pltfm_data sdhci_tegra20_pdata = {
+static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
@@ -187,10 +169,8 @@ static struct sdhci_tegra_soc_data soc_data_tegra20 = {
.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
NVQUIRK_ENABLE_BLOCK_GAP_DET,
};
-#endif
-#ifdef CONFIG_ARCH_TEGRA_3x_SOC
-static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
+static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
@@ -203,32 +183,37 @@ static struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
};
-#endif
+
+static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .ops = &tegra_sdhci_ops,
+};
+
+static struct sdhci_tegra_soc_data soc_data_tegra114 = {
+ .pdata = &sdhci_tegra114_pdata,
+};
static const struct of_device_id sdhci_tegra_dt_match[] = {
-#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
-#endif
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
-#endif
{}
};
-MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
+MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
-static void sdhci_tegra_parse_dt(struct device *dev,
- struct sdhci_tegra *tegra_host)
+static void sdhci_tegra_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
- u32 bus_width;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
- tegra_host->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- tegra_host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
tegra_host->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
-
- if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
- bus_width == 8)
- tegra_host->is_8bit = 1;
+ mmc_of_parse(host->mmc);
}
static int sdhci_tegra_probe(struct platform_device *pdev)
@@ -260,7 +245,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
tegra_host->soc_data = soc_data;
pltfm_host->priv = tegra_host;
- sdhci_tegra_parse_dt(&pdev->dev, tegra_host);
+ sdhci_tegra_parse_dt(&pdev->dev);
if (gpio_is_valid(tegra_host->power_gpio)) {
rc = gpio_request(tegra_host->power_gpio, "sdhci_power");
@@ -272,37 +257,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
gpio_direction_output(tegra_host->power_gpio, 1);
}
- if (gpio_is_valid(tegra_host->cd_gpio)) {
- rc = gpio_request(tegra_host->cd_gpio, "sdhci_cd");
- if (rc) {
- dev_err(mmc_dev(host->mmc),
- "failed to allocate cd gpio\n");
- goto err_cd_req;
- }
- gpio_direction_input(tegra_host->cd_gpio);
-
- rc = request_irq(gpio_to_irq(tegra_host->cd_gpio),
- carddetect_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- mmc_hostname(host->mmc), host);
-
- if (rc) {
- dev_err(mmc_dev(host->mmc), "request irq error\n");
- goto err_cd_irq_req;
- }
-
- }
-
- if (gpio_is_valid(tegra_host->wp_gpio)) {
- rc = gpio_request(tegra_host->wp_gpio, "sdhci_wp");
- if (rc) {
- dev_err(mmc_dev(host->mmc),
- "failed to allocate wp gpio\n");
- goto err_wp_req;
- }
- gpio_direction_input(tegra_host->wp_gpio);
- }
-
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
@@ -312,9 +266,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
pltfm_host->clk = clk;
- if (tegra_host->is_8bit)
- host->mmc->caps |= MMC_CAP_8_BIT_DATA;
-
rc = sdhci_add_host(host);
if (rc)
goto err_add_host;
@@ -325,15 +276,6 @@ err_add_host:
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
err_clk_get:
- if (gpio_is_valid(tegra_host->wp_gpio))
- gpio_free(tegra_host->wp_gpio);
-err_wp_req:
- if (gpio_is_valid(tegra_host->cd_gpio))
- free_irq(gpio_to_irq(tegra_host->cd_gpio), host);
-err_cd_irq_req:
- if (gpio_is_valid(tegra_host->cd_gpio))
- gpio_free(tegra_host->cd_gpio);
-err_cd_req:
if (gpio_is_valid(tegra_host->power_gpio))
gpio_free(tegra_host->power_gpio);
err_power_req:
@@ -351,14 +293,6 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
- if (gpio_is_valid(tegra_host->wp_gpio))
- gpio_free(tegra_host->wp_gpio);
-
- if (gpio_is_valid(tegra_host->cd_gpio)) {
- free_irq(gpio_to_irq(tegra_host->cd_gpio), host);
- gpio_free(tegra_host->cd_gpio);
- }
-
if (gpio_is_valid(tegra_host->power_gpio))
gpio_free(tegra_host->power_gpio);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 51bbba4..2ea429c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1581,6 +1581,37 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_runtime_pm_put(host);
}
+static int sdhci_do_get_cd(struct sdhci_host *host)
+{
+ int gpio_cd = mmc_gpio_get_cd(host->mmc);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ return 0;
+
+ /* If polling/nonremovable, assume that the card is always present. */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ return 1;
+
+ /* Try slot gpio detect */
+ if (!IS_ERR_VALUE(gpio_cd))
+ return !!gpio_cd;
+
+ /* Host native card detect */
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
+static int sdhci_get_cd(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int ret;
+
+ sdhci_runtime_pm_get(host);
+ ret = sdhci_do_get_cd(host);
+ sdhci_runtime_pm_put(host);
+ return ret;
+}
+
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
@@ -2038,6 +2069,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
.set_ios = sdhci_set_ios,
+ .get_cd = sdhci_get_cd,
.get_ro = sdhci_get_ro,
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
@@ -2907,12 +2939,17 @@ int sdhci_add_host(struct sdhci_host *host)
host->vqmmc = NULL;
}
} else {
- regulator_enable(host->vqmmc);
+ ret = regulator_enable(host->vqmmc);
if (!regulator_is_supported_voltage(host->vqmmc, 1700000,
1950000))
caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_SDR50 |
SDHCI_SUPPORT_DDR50);
+ if (ret) {
+ pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
+ mmc_hostname(mmc), ret);
+ host->vqmmc = NULL;
+ }
}
if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index c6d0015..442f576 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -348,13 +348,11 @@ static void wmt_complete_data_request(struct wmt_mci_priv *priv)
static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
{
- struct mmc_host *mmc;
struct wmt_mci_priv *priv;
int status;
priv = (struct wmt_mci_priv *)data;
- mmc = priv->mmc;
status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
@@ -925,7 +923,7 @@ static int wmt_mci_remove(struct platform_device *pdev)
clk_put(priv->clk_sdmmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
mmc_free_host(mmc);
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 3b9a284..74dbb6b 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -204,14 +204,16 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
#ifdef CONFIG_MODULES
- char probename[16+sizeof(MODULE_SYMBOL_PREFIX)];
+ char probename[sizeof(VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X))];
cfi_cmdset_fn_t *probe_function;
- sprintf(probename, MODULE_SYMBOL_PREFIX "cfi_cmdset_%4.4X", type);
+ sprintf(probename, VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X), type);
probe_function = __symbol_get(probename);
if (!probe_function) {
- request_module(probename + sizeof(MODULE_SYMBOL_PREFIX) - 1);
+ char modname[sizeof("cfi_cmdset_%4.4X")];
+ sprintf(modname, "cfi_cmdset_%4.4X", type);
+ request_module(modname);
probe_function = __symbol_get(probename);
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 5ad39bb..5073cbc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -237,13 +237,12 @@ error_put:
return ret;
}
-static int blktrans_release(struct gendisk *disk, fmode_t mode)
+static void blktrans_release(struct gendisk *disk, fmode_t mode)
{
struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
- int ret = 0;
if (!dev)
- return ret;
+ return;
mutex_lock(&dev->lock);
@@ -254,13 +253,13 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
module_put(dev->tr->owner);
if (dev->mtd) {
- ret = dev->tr->release ? dev->tr->release(dev) : 0;
+ if (dev->tr->release)
+ dev->tr->release(dev);
__put_mtd_device(dev->mtd);
}
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
- return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 6c6d807..2aef5dd 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -308,7 +308,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
return 0;
}
-static int mtdblock_release(struct mtd_blktrans_dev *mbd)
+static void mtdblock_release(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
@@ -333,8 +333,6 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblks_lock);
pr_debug("ok\n");
-
- return 0;
}
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 61d5f56..322ca65 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -36,6 +36,7 @@
#include <linux/idr.h>
#include <linux/backing-dev.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 717881a..25ecfa1 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -36,7 +36,6 @@
#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
-#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
/* add our owner bbt descriptor */
static uint8_t scan_ff_pattern[] = { 0xff };
@@ -420,28 +419,6 @@ static void release_bch_irq(struct gpmi_nand_data *this)
free_irq(i, this);
}
-static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
-{
- struct gpmi_nand_data *this = param;
- int dma_channel = (int)this->private;
-
- if (!mxs_dma_is_apbh(chan))
- return false;
- /*
- * only catch the GPMI dma channels :
- * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
- * (These four channels share the same IRQ!)
- *
- * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
- * (These eight channels share the same IRQ!)
- */
- if (dma_channel == chan->chan_id) {
- chan->private = &this->dma_data;
- return true;
- }
- return false;
-}
-
static void release_dma_channels(struct gpmi_nand_data *this)
{
unsigned int i;
@@ -455,36 +432,10 @@ static void release_dma_channels(struct gpmi_nand_data *this)
static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
- struct resource *r_dma;
- struct device_node *dn;
- u32 dma_channel;
- int ret;
struct dma_chan *dma_chan;
- dma_cap_mask_t mask;
-
- /* dma channel, we only use the first one. */
- dn = pdev->dev.of_node;
- ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
- if (ret) {
- pr_err("unable to get DMA channel from dt.\n");
- goto acquire_err;
- }
- this->private = (void *)dma_channel;
-
- /* gpmi dma interrupt */
- r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- GPMI_NAND_DMA_INTERRUPT_RES_NAME);
- if (!r_dma) {
- pr_err("Can't get resource for DMA\n");
- goto acquire_err;
- }
- this->dma_data.chan_irq = r_dma->start;
/* request dma channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+ dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!dma_chan) {
pr_err("Failed to request DMA channel.\n");
goto acquire_err;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 0729477..a7685e3 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -20,7 +20,7 @@
#include <linux/mtd/nand.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/fsl/mxs-dma.h>
+#include <linux/dmaengine.h>
#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
struct resources {
@@ -180,7 +180,6 @@ struct gpmi_nand_data {
/* DMA channels */
#define DMA_CHANS 8
struct dma_chan *dma_chans[DMA_CHANS];
- struct mxs_dma_data dma_data;
enum dma_ops_type last_dma_type;
enum dma_ops_type dma_type;
struct completion dma_done;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 33f2a8f..2cf7408 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -23,11 +23,11 @@
#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <asm/mach/flash.h>
-#include <plat/regs-onenand.h>
-#include <linux/io.h>
+#include "samsung.h"
enum soc_type {
TYPE_S3C6400,
diff --git a/drivers/mtd/onenand/samsung.h b/drivers/mtd/onenand/samsung.h
new file mode 100644
index 0000000..c4a80e6
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.h
@@ -0,0 +1,61 @@
+/*
+ * linux/arch/arm/plat-s3c/include/plat/regs-onenand.h
+ *
+ * Copyright (C) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SAMSUNG_ONENAND_H__
+#define __SAMSUNG_ONENAND_H__
+
+/*
+ * OneNAND Controller
+ */
+#define MEM_CFG_OFFSET 0x0000
+#define BURST_LEN_OFFSET 0x0010
+#define MEM_RESET_OFFSET 0x0020
+#define INT_ERR_STAT_OFFSET 0x0030
+#define INT_ERR_MASK_OFFSET 0x0040
+#define INT_ERR_ACK_OFFSET 0x0050
+#define ECC_ERR_STAT_OFFSET 0x0060
+#define MANUFACT_ID_OFFSET 0x0070
+#define DEVICE_ID_OFFSET 0x0080
+#define DATA_BUF_SIZE_OFFSET 0x0090
+#define BOOT_BUF_SIZE_OFFSET 0x00A0
+#define BUF_AMOUNT_OFFSET 0x00B0
+#define TECH_OFFSET 0x00C0
+#define FBA_WIDTH_OFFSET 0x00D0
+#define FPA_WIDTH_OFFSET 0x00E0
+#define FSA_WIDTH_OFFSET 0x00F0
+#define TRANS_SPARE_OFFSET 0x0140
+#define DBS_DFS_WIDTH_OFFSET 0x0160
+#define INT_PIN_ENABLE_OFFSET 0x01A0
+#define ACC_CLOCK_OFFSET 0x01C0
+#define FLASH_VER_ID_OFFSET 0x01F0
+#define FLASH_AUX_CNTRL_OFFSET 0x0300 /* s3c64xx only */
+
+#define ONENAND_MEM_RESET_HOT 0x3
+#define ONENAND_MEM_RESET_COLD 0x2
+#define ONENAND_MEM_RESET_WARM 0x1
+
+#define CACHE_OP_ERR (1 << 13)
+#define RST_CMP (1 << 12)
+#define RDY_ACT (1 << 11)
+#define INT_ACT (1 << 10)
+#define UNSUP_CMD (1 << 9)
+#define LOCKED_BLK (1 << 8)
+#define BLK_RW_CMP (1 << 7)
+#define ERS_CMP (1 << 6)
+#define PGM_CMP (1 << 5)
+#define LOAD_CMP (1 << 4)
+#define ERS_FAIL (1 << 3)
+#define PGM_FAIL (1 << 2)
+#define INT_TO (1 << 1)
+#define LD_FAIL_ECC_ERR (1 << 0)
+
+#define TSRF (1 << 0)
+
+#endif
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 8dd6ba5..f9d5615 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1107,7 +1107,7 @@ static int sm_flush(struct mtd_blktrans_dev *dev)
}
/* outside interface: device is released */
-static int sm_release(struct mtd_blktrans_dev *dev)
+static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
@@ -1116,7 +1116,6 @@ static int sm_release(struct mtd_blktrans_dev *dev)
cancel_work_sync(&ftl->flush_work);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
- return 0;
}
/* outside interface: get geometry */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 3cea38d..94d06f1 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -218,15 +218,13 @@ static const struct seq_operations bond_info_seq_ops = {
static int bond_info_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
- struct proc_dir_entry *proc;
int res;
res = seq_open(file, &bond_info_seq_ops);
if (!res) {
/* recover the pointer buried in proc_dir_entry data */
seq = file->private_data;
- proc = PDE(inode);
- seq->private = proc->data;
+ seq->private = PDE_DATA(inode);
}
return res;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index a966128..7ffc756 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -40,3 +40,17 @@ config CAIF_HSI
The caif low level driver for CAIF over HSI.
Be aware that if you enable this then you also need to
enable a low-level HSI driver.
+
+config CAIF_VIRTIO
+ tristate "CAIF virtio transport driver"
+ depends on CAIF
+ select VHOST_RING
+ select VIRTIO
+ select GENERIC_ALLOCATOR
+ default n
+ ---help---
+ The caif driver for CAIF over Virtio.
+
+if CAIF_VIRTIO
+source "drivers/vhost/Kconfig"
+endif
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 15a9d2f..9bbd453 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -9,3 +9,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
+
+# Virtio interface
+obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
new file mode 100644
index 0000000..b9ed128
--- /dev/null
+++ b/drivers/net/caif/caif_virtio.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2013
+ * Authors: Vicram Arv
+ * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ * Sjur Brendeland
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/virtio.h>
+#include <linux/vringh.h>
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_caif.h>
+#include <linux/virtio_ring.h>
+#include <linux/dma-mapping.h>
+#include <net/caif/caif_dev.h>
+#include <linux/virtio_config.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vicram Arv");
+MODULE_AUTHOR("Sjur Brendeland");
+MODULE_DESCRIPTION("Virtio CAIF Driver");
+
+/* NAPI schedule quota */
+#define CFV_DEFAULT_QUOTA 32
+
+/* Defaults used if virtio config space is unavailable */
+#define CFV_DEF_MTU_SIZE 4096
+#define CFV_DEF_HEADROOM 32
+#define CFV_DEF_TAILROOM 32
+
+/* Required IP header alignment */
+#define IP_HDR_ALIGN 4
+
+/* struct cfv_napi_contxt - NAPI context info
+ * @riov: IOV holding data read from the ring. Note that riov may
+ * still hold data when cfv_rx_poll() returns.
+ * @head: Last descriptor ID we received from vringh_getdesc_kern.
+ * We use this to put descriptor back on the used ring. USHRT_MAX is
+ * used to indicate invalid head-id.
+ */
+struct cfv_napi_context {
+ struct vringh_kiov riov;
+ unsigned short head;
+};
+
+/* struct cfv_stats - statistics for debugfs
+ * @rx_napi_complete: Number of NAPI completions (RX)
+ * @rx_napi_resched: Number of calls where the full quota was used (RX)
+ * @rx_nomem: Number of SKB alloc failures (RX)
+ * @rx_kicks: Number of RX kicks
+ * @tx_full_ring: Number times TX ring was full
+ * @tx_no_mem: Number of times TX went out of memory
+ * @tx_flow_on: Number of flow on (TX)
+ * @tx_kicks: Number of TX kicks
+ */
+struct cfv_stats {
+ u32 rx_napi_complete;
+ u32 rx_napi_resched;
+ u32 rx_nomem;
+ u32 rx_kicks;
+ u32 tx_full_ring;
+ u32 tx_no_mem;
+ u32 tx_flow_on;
+ u32 tx_kicks;
+};
+
+/* struct cfv_info - Caif Virtio control structure
+ * @cfdev: caif common header
+ * @vdev: Associated virtio device
+ * @vr_rx: rx/downlink host vring
+ * @vq_tx: tx/uplink virtqueue
+ * @ndev: CAIF link layer device
+ * @watermark_tx: indicates number of free descriptors we need
+ * to reopen the tx-queues after overload.
+ * @tx_lock: protects vq_tx from concurrent use
+ * @tx_release_tasklet: Tasklet for freeing consumed TX buffers
+ * @napi: Napi context used in cfv_rx_poll()
+ * @ctx: Context data used in cfv_rx_poll()
+ * @tx_hr: transmit headroom
+ * @rx_hr: receive headroom
+ * @tx_tr: transmit tail room
+ * @rx_tr: receive tail room
+ * @mtu: transmit max size
+ * @mru: receive max size
+ * @allocsz: size of dma memory reserved for TX buffers
+ * @alloc_addr: virtual address to dma memory for TX buffers
+ * @alloc_dma: dma address to dma memory for TX buffers
+ * @genpool: Gen Pool used for allocating TX buffers
+ * @reserved_mem: Pointer to memory reserve allocated from genpool
+ * @reserved_size: Size of memory reserve allocated from genpool
+ * @stats: Statistics exposed in sysfs
+ * @debugfs: Debugfs dentry for statistic counters
+ */
+struct cfv_info {
+ struct caif_dev_common cfdev;
+ struct virtio_device *vdev;
+ struct vringh *vr_rx;
+ struct virtqueue *vq_tx;
+ struct net_device *ndev;
+ unsigned int watermark_tx;
+ /* Protect access to vq_tx */
+ spinlock_t tx_lock;
+ struct tasklet_struct tx_release_tasklet;
+ struct napi_struct napi;
+ struct cfv_napi_context ctx;
+ u16 tx_hr;
+ u16 rx_hr;
+ u16 tx_tr;
+ u16 rx_tr;
+ u32 mtu;
+ u32 mru;
+ size_t allocsz;
+ void *alloc_addr;
+ dma_addr_t alloc_dma;
+ struct gen_pool *genpool;
+ unsigned long reserved_mem;
+ size_t reserved_size;
+ struct cfv_stats stats;
+ struct dentry *debugfs;
+};
+
+/* struct buf_info - maintains transmit buffer data handle
+ * @size: size of transmit buffer
+ * @dma_handle: handle to allocated dma device memory area
+ * @vaddr: virtual address mapping to allocated memory area
+ */
+struct buf_info {
+ size_t size;
+ u8 *vaddr;
+};
+
+/* Called from virtio device, in IRQ context */
+static void cfv_release_cb(struct virtqueue *vq_tx)
+{
+ struct cfv_info *cfv = vq_tx->vdev->priv;
+
+ ++cfv->stats.tx_kicks;
+ tasklet_schedule(&cfv->tx_release_tasklet);
+}
+
+static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
+{
+ if (!buf_info)
+ return;
+ gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
+ buf_info->size);
+ kfree(buf_info);
+}
+
+/* This is invoked whenever the remote processor completed processing
+ * a TX msg we just sent, and the buffer is put back to the used ring.
+ */
+static void cfv_release_used_buf(struct virtqueue *vq_tx)
+{
+ struct cfv_info *cfv = vq_tx->vdev->priv;
+ unsigned long flags;
+
+ BUG_ON(vq_tx != cfv->vq_tx);
+
+ for (;;) {
+ unsigned int len;
+ struct buf_info *buf_info;
+
+ /* Get used buffer from used ring to recycle used descriptors */
+ spin_lock_irqsave(&cfv->tx_lock, flags);
+ buf_info = virtqueue_get_buf(vq_tx, &len);
+ spin_unlock_irqrestore(&cfv->tx_lock, flags);
+
+ /* Stop looping if there are no more buffers to free */
+ if (!buf_info)
+ break;
+
+ free_buf_info(cfv, buf_info);
+
+ /* watermark_tx indicates if we previously stopped the tx
+ * queues. If we have enough free stots in the virtio ring,
+ * re-establish memory reserved and open up tx queues.
+ */
+ if (cfv->vq_tx->num_free <= cfv->watermark_tx)
+ continue;
+
+ /* Re-establish memory reserve */
+ if (cfv->reserved_mem == 0 && cfv->genpool)
+ cfv->reserved_mem =
+ gen_pool_alloc(cfv->genpool,
+ cfv->reserved_size);
+
+ /* Open up the tx queues */
+ if (cfv->reserved_mem) {
+ cfv->watermark_tx =
+ virtqueue_get_vring_size(cfv->vq_tx);
+ netif_tx_wake_all_queues(cfv->ndev);
+ /* Buffers are recycled in cfv_netdev_tx, so
+ * disable notifications when queues are opened.
+ */
+ virtqueue_disable_cb(cfv->vq_tx);
+ ++cfv->stats.tx_flow_on;
+ } else {
+ /* if no memory reserve, wait for more free slots */
+ WARN_ON(cfv->watermark_tx >
+ virtqueue_get_vring_size(cfv->vq_tx));
+ cfv->watermark_tx +=
+ virtqueue_get_vring_size(cfv->vq_tx) / 4;
+ }
+ }
+}
+
+/* Allocate a SKB and copy packet data to it */
+static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
+ struct cfv_info *cfv,
+ u8 *frm, u32 frm_len)
+{
+ struct sk_buff *skb;
+ u32 cfpkt_len, pad_len;
+
+ *err = 0;
+ /* Verify that packet size with down-link header and mtu size */
+ if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
+ netdev_err(cfv->ndev,
+ "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n",
+ frm_len, cfv->mru, cfv->rx_hr,
+ cfv->rx_tr);
+ *err = -EPROTO;
+ return NULL;
+ }
+
+ cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
+ pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
+
+ skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
+ if (!skb) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ skb_reserve(skb, cfv->rx_hr + pad_len);
+
+ memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len);
+ return skb;
+}
+
+/* Get packets from the host vring */
+static int cfv_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
+ int rxcnt = 0;
+ int err = 0;
+ void *buf;
+ struct sk_buff *skb;
+ struct vringh_kiov *riov = &cfv->ctx.riov;
+ unsigned int skb_len;
+
+again:
+ do {
+ skb = NULL;
+
+ /* Put the previous iovec back on the used ring and
+ * fetch a new iovec if we have processed all elements.
+ */
+ if (riov->i == riov->used) {
+ if (cfv->ctx.head != USHRT_MAX) {
+ vringh_complete_kern(cfv->vr_rx,
+ cfv->ctx.head,
+ 0);
+ cfv->ctx.head = USHRT_MAX;
+ }
+
+ err = vringh_getdesc_kern(
+ cfv->vr_rx,
+ riov,
+ NULL,
+ &cfv->ctx.head,
+ GFP_ATOMIC);
+
+ if (err <= 0)
+ goto exit;
+ }
+
+ buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
+ /* TODO: Add check on valid buffer address */
+
+ skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
+ riov->iov[riov->i].iov_len);
+ if (unlikely(err))
+ goto exit;
+
+ /* Push received packet up the stack. */
+ skb_len = skb->len;
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = cfv->ndev;
+ err = netif_receive_skb(skb);
+ if (unlikely(err)) {
+ ++cfv->ndev->stats.rx_dropped;
+ } else {
+ ++cfv->ndev->stats.rx_packets;
+ cfv->ndev->stats.rx_bytes += skb_len;
+ }
+
+ ++riov->i;
+ ++rxcnt;
+ } while (rxcnt < quota);
+
+ ++cfv->stats.rx_napi_resched;
+ goto out;
+
+exit:
+ switch (err) {
+ case 0:
+ ++cfv->stats.rx_napi_complete;
+
+ /* Really out of patckets? (stolen from virtio_net)*/
+ napi_complete(napi);
+ if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
+ napi_schedule_prep(napi)) {
+ vringh_notify_disable_kern(cfv->vr_rx);
+ __napi_schedule(napi);
+ goto again;
+ }
+ break;
+
+ case -ENOMEM:
+ ++cfv->stats.rx_nomem;
+ dev_kfree_skb(skb);
+ /* Stop NAPI poll on OOM, we hope to be polled later */
+ napi_complete(napi);
+ vringh_notify_enable_kern(cfv->vr_rx);
+ break;
+
+ default:
+ /* We're doomed, any modem fault is fatal */
+ netdev_warn(cfv->ndev, "Bad ring, disable device\n");
+ cfv->ndev->stats.rx_dropped = riov->used - riov->i;
+ napi_complete(napi);
+ vringh_notify_disable_kern(cfv->vr_rx);
+ netif_carrier_off(cfv->ndev);
+ break;
+ }
+out:
+ if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
+ vringh_notify(cfv->vr_rx);
+ return rxcnt;
+}
+
+static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
+{
+ struct cfv_info *cfv = vdev->priv;
+
+ ++cfv->stats.rx_kicks;
+ vringh_notify_disable_kern(cfv->vr_rx);
+ napi_schedule(&cfv->napi);
+}
+
+static void cfv_destroy_genpool(struct cfv_info *cfv)
+{
+ if (cfv->alloc_addr)
+ dma_free_coherent(cfv->vdev->dev.parent->parent,
+ cfv->allocsz, cfv->alloc_addr,
+ cfv->alloc_dma);
+
+ if (!cfv->genpool)
+ return;
+ gen_pool_free(cfv->genpool, cfv->reserved_mem,
+ cfv->reserved_size);
+ gen_pool_destroy(cfv->genpool);
+ cfv->genpool = NULL;
+}
+
+static int cfv_create_genpool(struct cfv_info *cfv)
+{
+ int err;
+
+ /* dma_alloc can only allocate whole pages, and we need a more
+ * fine graned allocation so we use genpool. We ask for space needed
+ * by IP and a full ring. If the dma allcoation fails we retry with a
+ * smaller allocation size.
+ */
+ err = -ENOMEM;
+ cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
+ (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
+ if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
+ return -EINVAL;
+
+ for (;;) {
+ if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
+ netdev_info(cfv->ndev, "Not enough device memory\n");
+ return -ENOMEM;
+ }
+
+ cfv->alloc_addr = dma_alloc_coherent(
+ cfv->vdev->dev.parent->parent,
+ cfv->allocsz, &cfv->alloc_dma,
+ GFP_ATOMIC);
+ if (cfv->alloc_addr)
+ break;
+
+ cfv->allocsz = (cfv->allocsz * 3) >> 2;
+ }
+
+ netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
+ cfv->allocsz);
+
+ /* Allocate on 128 bytes boundaries (1 << 7)*/
+ cfv->genpool = gen_pool_create(7, -1);
+ if (!cfv->genpool)
+ goto err;
+
+ err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
+ (phys_addr_t)virt_to_phys(cfv->alloc_addr),
+ cfv->allocsz, -1);
+ if (err)
+ goto err;
+
+ /* Reserve some memory for low memory situations. If we hit the roof
+ * in the memory pool, we stop TX flow and release the reserve.
+ */
+ cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
+ cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
+ cfv->reserved_size);
+ if (!cfv->reserved_mem) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
+ return 0;
+err:
+ cfv_destroy_genpool(cfv);
+ return err;
+}
+
+/* Enable the CAIF interface and allocate the memory-pool */
+static int cfv_netdev_open(struct net_device *netdev)
+{
+ struct cfv_info *cfv = netdev_priv(netdev);
+
+ if (cfv_create_genpool(cfv))
+ return -ENOMEM;
+
+ netif_carrier_on(netdev);
+ napi_enable(&cfv->napi);
+
+ /* Schedule NAPI to read any pending packets */
+ napi_schedule(&cfv->napi);
+ return 0;
+}
+
+/* Disable the CAIF interface and free the memory-pool */
+static int cfv_netdev_close(struct net_device *netdev)
+{
+ struct cfv_info *cfv = netdev_priv(netdev);
+ unsigned long flags;
+ struct buf_info *buf_info;
+
+ /* Disable interrupts, queues and NAPI polling */
+ netif_carrier_off(netdev);
+ virtqueue_disable_cb(cfv->vq_tx);
+ vringh_notify_disable_kern(cfv->vr_rx);
+ napi_disable(&cfv->napi);
+
+ /* Release any TX buffers on both used and avilable rings */
+ cfv_release_used_buf(cfv->vq_tx);
+ spin_lock_irqsave(&cfv->tx_lock, flags);
+ while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
+ free_buf_info(cfv, buf_info);
+ spin_unlock_irqrestore(&cfv->tx_lock, flags);
+
+ /* Release all dma allocated memory and destroy the pool */
+ cfv_destroy_genpool(cfv);
+ return 0;
+}
+
+/* Allocate a buffer in dma-memory and copy skb to it */
+static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
+ struct sk_buff *skb,
+ struct scatterlist *sg)
+{
+ struct caif_payload_info *info = (void *)&skb->cb;
+ struct buf_info *buf_info = NULL;
+ u8 pad_len, hdr_ofs;
+
+ if (!cfv->genpool)
+ goto err;
+
+ if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
+ netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
+ cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
+ goto err;
+ }
+
+ buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
+ if (unlikely(!buf_info))
+ goto err;
+
+ /* Make the IP header aligned in tbe buffer */
+ hdr_ofs = cfv->tx_hr + info->hdr_len;
+ pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
+ buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
+
+ /* allocate dma memory buffer */
+ buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
+ if (unlikely(!buf_info->vaddr))
+ goto err;
+
+ /* copy skbuf contents to send buffer */
+ skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
+ sg_init_one(sg, buf_info->vaddr + pad_len,
+ skb->len + cfv->tx_hr + cfv->rx_hr);
+
+ return buf_info;
+err:
+ kfree(buf_info);
+ return NULL;
+}
+
+/* Put the CAIF packet on the virtio ring and kick the receiver */
+static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct cfv_info *cfv = netdev_priv(netdev);
+ struct buf_info *buf_info;
+ struct scatterlist sg;
+ unsigned long flags;
+ bool flow_off = false;
+ int ret;
+
+ /* garbage collect released buffers */
+ cfv_release_used_buf(cfv->vq_tx);
+ spin_lock_irqsave(&cfv->tx_lock, flags);
+
+ /* Flow-off check takes into account number of cpus to make sure
+ * virtqueue will not be overfilled in any possible smp conditions.
+ *
+ * Flow-on is triggered when sufficient buffers are freed
+ */
+ if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
+ flow_off = true;
+ cfv->stats.tx_full_ring++;
+ }
+
+ /* If we run out of memory, we release the memory reserve and retry
+ * allocation.
+ */
+ buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
+ if (unlikely(!buf_info)) {
+ cfv->stats.tx_no_mem++;
+ flow_off = true;
+
+ if (cfv->reserved_mem && cfv->genpool) {
+ gen_pool_free(cfv->genpool, cfv->reserved_mem,
+ cfv->reserved_size);
+ cfv->reserved_mem = 0;
+ buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
+ }
+ }
+
+ if (unlikely(flow_off)) {
+ /* Turn flow on when a 1/4 of the descriptors are released */
+ cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
+ /* Enable notifications of recycled TX buffers */
+ virtqueue_enable_cb(cfv->vq_tx);
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ if (unlikely(!buf_info)) {
+ /* If the memory reserve does it's job, this shouldn't happen */
+ netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
+ goto err;
+ }
+
+ ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
+ if (unlikely((ret < 0))) {
+ /* If flow control works, this shouldn't happen */
+ netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
+ ret);
+ goto err;
+ }
+
+ /* update netdev statistics */
+ cfv->ndev->stats.tx_packets++;
+ cfv->ndev->stats.tx_bytes += skb->len;
+ spin_unlock_irqrestore(&cfv->tx_lock, flags);
+
+ /* tell the remote processor it has a pending message to read */
+ virtqueue_kick(cfv->vq_tx);
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+err:
+ spin_unlock_irqrestore(&cfv->tx_lock, flags);
+ cfv->ndev->stats.tx_dropped++;
+ free_buf_info(cfv, buf_info);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void cfv_tx_release_tasklet(unsigned long drv)
+{
+ struct cfv_info *cfv = (struct cfv_info *)drv;
+ cfv_release_used_buf(cfv->vq_tx);
+}
+
+static const struct net_device_ops cfv_netdev_ops = {
+ .ndo_open = cfv_netdev_open,
+ .ndo_stop = cfv_netdev_close,
+ .ndo_start_xmit = cfv_netdev_tx,
+};
+
+static void cfv_netdev_setup(struct net_device *netdev)
+{
+ netdev->netdev_ops = &cfv_netdev_ops;
+ netdev->type = ARPHRD_CAIF;
+ netdev->tx_queue_len = 100;
+ netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ netdev->mtu = CFV_DEF_MTU_SIZE;
+ netdev->destructor = free_netdev;
+}
+
+/* Create debugfs counters for the device */
+static inline void debugfs_init(struct cfv_info *cfv)
+{
+ cfv->debugfs =
+ debugfs_create_dir(netdev_name(cfv->ndev), NULL);
+
+ if (IS_ERR(cfv->debugfs))
+ return;
+
+ debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs,
+ &cfv->stats.rx_napi_complete);
+ debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs,
+ &cfv->stats.rx_napi_resched);
+ debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs,
+ &cfv->stats.rx_nomem);
+ debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs,
+ &cfv->stats.rx_kicks);
+ debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs,
+ &cfv->stats.tx_full_ring);
+ debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs,
+ &cfv->stats.tx_no_mem);
+ debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs,
+ &cfv->stats.tx_kicks);
+ debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs,
+ &cfv->stats.tx_flow_on);
+}
+
+/* Setup CAIF for the a virtio device */
+static int cfv_probe(struct virtio_device *vdev)
+{
+ vq_callback_t *vq_cbs = cfv_release_cb;
+ vrh_callback_t *vrh_cbs = cfv_recv;
+ const char *names = "output";
+ const char *cfv_netdev_name = "cfvrt";
+ struct net_device *netdev;
+ struct cfv_info *cfv;
+ int err = -EINVAL;
+
+ netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
+ cfv_netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ cfv = netdev_priv(netdev);
+ cfv->vdev = vdev;
+ cfv->ndev = netdev;
+
+ spin_lock_init(&cfv->tx_lock);
+
+ /* Get the RX virtio ring. This is a "host side vring". */
+ err = -ENODEV;
+ if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs)
+ goto err;
+
+ err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
+ if (err)
+ goto err;
+
+ /* Get the TX virtio ring. This is a "guest side vring". */
+ err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names);
+ if (err)
+ goto err;
+
+ /* Get the CAIF configuration from virtio config space, if available */
+#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
+ ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
+ &_var, \
+ FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
+
+ if (vdev->config->get) {
+ GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
+ GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
+ GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
+ GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
+ GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
+ GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
+ } else {
+ cfv->tx_hr = CFV_DEF_HEADROOM;
+ cfv->rx_hr = CFV_DEF_HEADROOM;
+ cfv->tx_tr = CFV_DEF_TAILROOM;
+ cfv->rx_tr = CFV_DEF_TAILROOM;
+ cfv->mtu = CFV_DEF_MTU_SIZE;
+ cfv->mru = CFV_DEF_MTU_SIZE;
+ }
+
+ netdev->needed_headroom = cfv->tx_hr;
+ netdev->needed_tailroom = cfv->tx_tr;
+
+ /* Disable buffer release interrupts unless we have stopped TX queues */
+ virtqueue_disable_cb(cfv->vq_tx);
+
+ netdev->mtu = cfv->mtu - cfv->tx_tr;
+ vdev->priv = cfv;
+
+ /* Initialize NAPI poll context data */
+ vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
+ cfv->ctx.head = USHRT_MAX;
+ netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
+
+ tasklet_init(&cfv->tx_release_tasklet,
+ cfv_tx_release_tasklet,
+ (unsigned long)cfv);
+
+ /* Carrier is off until netdevice is opened */
+ netif_carrier_off(netdev);
+
+ /* register Netdev */
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
+ goto err;
+ }
+
+ debugfs_init(cfv);
+
+ return 0;
+err:
+ netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
+
+ if (cfv->vr_rx)
+ vdev->vringh_config->del_vrhs(cfv->vdev);
+ if (cfv->vdev)
+ vdev->config->del_vqs(cfv->vdev);
+ free_netdev(netdev);
+ return err;
+}
+
+static void cfv_remove(struct virtio_device *vdev)
+{
+ struct cfv_info *cfv = vdev->priv;
+
+ rtnl_lock();
+ dev_close(cfv->ndev);
+ rtnl_unlock();
+
+ tasklet_kill(&cfv->tx_release_tasklet);
+ debugfs_remove_recursive(cfv->debugfs);
+
+ vringh_kiov_cleanup(&cfv->ctx.riov);
+ vdev->config->reset(vdev);
+ vdev->vringh_config->del_vrhs(cfv->vdev);
+ cfv->vr_rx = NULL;
+ vdev->config->del_vqs(cfv->vdev);
+ unregister_netdev(cfv->ndev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+};
+
+static struct virtio_driver caif_virtio_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = cfv_probe,
+ .remove = cfv_remove,
+};
+
+module_virtio_driver(caif_virtio_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 1928e20..de570a8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -951,7 +951,7 @@ static int vortex_eisa_remove(struct device *device)
unregister_netdev(dev);
iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
- release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
+ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
free_netdev(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 40649a8..6b0dc13 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4085,7 +4085,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
if (!cp->csk_tbl)
return -ENOMEM;
- port_id = random32();
+ port_id = prandom_u32();
port_id %= CNIC_LOCAL_PORT_RANGE;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN, port_id)) {
@@ -4145,7 +4145,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- seed = random32();
+ seed = prandom_u32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c59ec3d..3cd397d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5204,7 +5204,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (t4_wait_dev_ready(adap) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
+ if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 234ce6f..f544b29 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -327,6 +327,7 @@ enum vf_state {
#define BE_FLAGS_LINK_STATUS_INIT 1
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
+#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 25d3290..e1e5bb9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -961,19 +961,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- if (lancer_chip(adapter)) {
- req->hdr.version = 2;
- req->page_size = 1; /* 1 for 4K */
- AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
- no_delay);
- AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
- __ilog2_u32(cq->len/256));
- AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
- ctxt, eq->id);
- } else {
+
+ if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
@@ -983,6 +972,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+ } else {
+ req->hdr.version = 2;
+ req->page_size = 1; /* 1 for 4K */
+ AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
+ no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
+ ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
+ ctxt, eq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1763,10 +1764,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
if (value == ON)
req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -2084,7 +2087,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(30000)))
+ msecs_to_jiffies(60000)))
status = -1;
else
status = adapter->flash_status;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a855668..025bdb0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -381,7 +381,7 @@ struct amap_cq_context_be {
u8 rsvd5[32]; /* dword 3*/
} __packed;
-struct amap_cq_context_lancer {
+struct amap_cq_context_v2 {
u8 rsvd0[12]; /* dword 0*/
u8 coalescwm[2]; /* dword 0*/
u8 nodelay; /* dword 0*/
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 5733cde..3d4461a 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -85,6 +85,7 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(tx_pauseframes)},
{DRVSTAT_INFO(tx_controlframes)},
{DRVSTAT_INFO(rx_priority_pause_frames)},
+ {DRVSTAT_INFO(tx_priority_pauseframes)},
/* Received packets dropped when an internal fifo going into
* main packet buffer tank (PMEM) overflows.
*/
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4babc8a..6c52a60 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -410,6 +410,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
drvs->tx_pauseframes = port_stats->tx_pauseframes;
drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
drvs->jabber_events = port_stats->jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
@@ -471,11 +472,26 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
ACCESS_ONCE(*acc) = newacc;
}
+void populate_erx_stats(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ u32 erx_stat)
+{
+ if (!BEx_chip(adapter))
+ rx_stats(rxo)->rx_drops_no_frags = erx_stat;
+ else
+ /* below erx HW counter can actually wrap around after
+ * 65535. Driver accumulates a 32-bit value
+ */
+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+ (u16)erx_stat);
+}
+
void be_parse_stats(struct be_adapter *adapter)
{
struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
struct be_rx_obj *rxo;
int i;
+ u32 erx_stat;
if (lancer_chip(adapter)) {
populate_lancer_stats(adapter);
@@ -488,12 +504,8 @@ void be_parse_stats(struct be_adapter *adapter)
/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
for_all_rx_queues(adapter, rxo, i) {
- /* below erx HW counter can actually wrap around after
- * 65535. Driver accumulates a 32-bit value
- */
- accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
- (u16)erx->rx_drops_no_fragments \
- [rxo->q.id]);
+ erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
+ populate_erx_stats(adapter, rxo, erx_stat);
}
}
}
@@ -2378,7 +2390,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
return num;
}
-static void be_msix_enable(struct be_adapter *adapter)
+static int be_msix_enable(struct be_adapter *adapter)
{
#define BE_MIN_MSIX_VECTORS 1
int i, status, num_vec, num_roce_vec = 0;
@@ -2403,13 +2415,17 @@ static void be_msix_enable(struct be_adapter *adapter)
goto done;
} else if (status >= BE_MIN_MSIX_VECTORS) {
num_vec = status;
- if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
- num_vec) == 0)
+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ num_vec);
+ if (!status)
goto done;
}
dev_warn(dev, "MSIx enable failed\n");
- return;
+ /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+ if (!be_physfn(adapter))
+ return status;
+ return 0;
done:
if (be_roce_supported(adapter)) {
if (num_vec > num_roce_vec) {
@@ -2423,7 +2439,7 @@ done:
} else
adapter->num_msix_vec = num_vec;
dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
- return;
+ return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2536,8 +2552,11 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- for_all_evt_queues(adapter, eqo, i)
- napi_disable(&eqo->napi);
+ if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
+ for_all_evt_queues(adapter, eqo, i)
+ napi_disable(&eqo->napi);
+ adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
+ }
be_async_mcc_disable(adapter);
@@ -2631,7 +2650,9 @@ static int be_open(struct net_device *netdev)
if (status)
goto err;
- be_irq_register(adapter);
+ status = be_irq_register(adapter);
+ if (status)
+ goto err;
for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, true, 0);
@@ -2645,6 +2666,7 @@ static int be_open(struct net_device *netdev)
napi_enable(&eqo->napi);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
}
+ adapter->flags |= BE_FLAGS_NAPI_ENABLED;
status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
if (!status)
@@ -3100,7 +3122,9 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- be_msix_enable(adapter);
+ status = be_msix_enable(adapter);
+ if (status)
+ goto err;
status = be_evt_queues_create(adapter);
if (status)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index d44f65b..ceb4d43 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -214,6 +214,7 @@ struct fec_enet_private {
struct clk *clk_ipg;
struct clk *clk_ahb;
+ struct clk *clk_enet_out;
struct clk *clk_ptp;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b9748f1..e25bf83 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1883,18 +1883,23 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
+ /* enet_out is optional, depends on board */
+ fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out))
+ fep->clk_enet_out = NULL;
+
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
fep->bufdesc_ex =
pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
- ret = PTR_ERR(fep->clk_ptp);
+ fep->clk_ptp = NULL;
fep->bufdesc_ex = 0;
}
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
- if (!IS_ERR(fep->clk_ptp))
- clk_prepare_enable(fep->clk_ptp);
+ clk_prepare_enable(fep->clk_enet_out);
+ clk_prepare_enable(fep->clk_ptp);
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
@@ -1962,8 +1967,8 @@ failed_irq:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
- if (!IS_ERR(fep->clk_ptp))
- clk_disable_unprepare(fep->clk_ptp);
+ clk_disable_unprepare(fep->clk_enet_out);
+ clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
failed_ioremap:
@@ -1985,6 +1990,7 @@ fec_drv_remove(struct platform_device *pdev)
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -2010,6 +2016,7 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
@@ -2022,6 +2029,7 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ clk_prepare_enable(fep->clk_enet_out);
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
if (netif_running(ndev)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 302d594..70fd559 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1322,7 +1322,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
- int rc, i;
+ int rc, i, mac_len;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
@@ -1332,11 +1332,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev->unit_address);
mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
- NULL);
+ &mac_len);
if (!mac_addr_p) {
dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
return -EINVAL;
}
+ /* Workaround for old/broken pHyp */
+ if (mac_len == 8)
+ mac_addr_p += 2;
+ else if (mac_len != 6) {
+ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
+ mac_len);
+ return -EINVAL;
+ }
mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
@@ -1361,17 +1369,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- /*
- * Some older boxes running PHYP non-natively have an OF that returns
- * a 8-byte local-mac-address field (and the first 2 bytes have to be
- * ignored) while newer boxes' OF return a 6-byte field. Note that
- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
- * The RPA doc specifies that the first byte must be 10b, so we'll
- * just look for it to solve this 8 vs. 6 byte field issue
- */
- if ((*mac_addr_p & 0x3) != 0x02)
- mac_addr_p += 2;
-
adapter->mac_addr = 0;
memcpy(&adapter->mac_addr, mac_addr_p, 6);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 82f1c84..ffbc08f 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -600,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- usleep_range(50, 100);
+ udelay(50);
return i;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 256ae78..d175bbd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2496,10 +2496,12 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
skb->rxhash = re->skb->rxhash;
+ skb->vlan_proto = re->skb->vlan_proto;
skb->vlan_tci = re->skb->vlan_tci;
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
+ re->skb->vlan_proto = 0;
re->skb->vlan_tci = 0;
re->skb->rxhash = 0;
re->skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bcf4d11..c9e6b62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .promisc_mode = MLX4_FS_REGULAR,
};
rule.port = priv->port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a69a908..b35f947 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
.queue_mode = MLX4_NET_TRANS_Q_LIFO,
.exclusive = 1,
.allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .promisc_mode = MLX4_FS_REGULAR,
.port = priv->port,
.priority = MLX4_DOMAIN_RFS,
};
@@ -448,7 +448,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
@@ -795,7 +795,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
- MLX4_FS_PROMISC_UPLINK);
+ MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
@@ -858,7 +858,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
- MLX4_FS_PROMISC_UPLINK);
+ MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -919,7 +919,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
- MLX4_FS_PROMISC_ALL_MULTI);
+ MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
@@ -942,7 +942,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
- MLX4_FS_PROMISC_ALL_MULTI);
+ MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
@@ -1621,10 +1621,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
MLX4_EN_FLAG_MC_PROMISC);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
- MLX4_FS_PROMISC_UPLINK);
+ MLX4_FS_ALL_DEFAULT);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
- MLX4_FS_PROMISC_ALL_MULTI);
+ MLX4_FS_MC_DEFAULT);
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e3123a..6000342 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+ __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ffc78d2..f3e804f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
}
+static const u8 __promisc_mode[] = {
+ [MLX4_FS_REGULAR] = 0x0,
+ [MLX4_FS_ALL_DEFAULT] = 0x1,
+ [MLX4_FS_MC_DEFAULT] = 0x3,
+ [MLX4_FS_UC_SNIFFER] = 0x4,
+ [MLX4_FS_MC_SNIFFER] = 0x5,
+};
+
+int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+ enum mlx4_net_trans_promisc_mode flow_type)
+{
+ if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+ mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
+ return -EINVAL;
+ }
+ return __promisc_mode[flow_type];
+}
+EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
+
static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
struct mlx4_net_trans_rule_hw_ctrl *hw)
{
- static const u8 __promisc_mode[] = {
- [MLX4_FS_PROMISC_NONE] = 0x0,
- [MLX4_FS_PROMISC_UPLINK] = 0x1,
- [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
- [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
- };
-
- u32 dw = 0;
-
- dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
- dw |= ctrl->exclusive ? (1 << 2) : 0;
- dw |= ctrl->allow_loopback ? (1 << 3) : 0;
- dw |= __promisc_mode[ctrl->promisc_mode] << 8;
- dw |= ctrl->priority << 16;
-
- hw->ctrl = cpu_to_be32(dw);
+ u8 flags = 0;
+
+ flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+ flags |= ctrl->exclusive ? (1 << 2) : 0;
+ flags |= ctrl->allow_loopback ? (1 << 3) : 0;
+
+ hw->flags = flags;
+ hw->type = __promisc_mode[ctrl->promisc_mode];
+ hw->prio = cpu_to_be16(ctrl->priority);
hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn);
}
@@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
};
+int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+ enum mlx4_net_trans_rule_id id)
+{
+ if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
+ return -EINVAL;
+ }
+ return __sw_id_hw[id];
+}
+EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
+
+static const int __rule_hw_sz[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] =
+ sizeof(struct mlx4_net_trans_rule_hw_eth),
+ [MLX4_NET_TRANS_RULE_ID_IB] =
+ sizeof(struct mlx4_net_trans_rule_hw_ib),
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] =
+ sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+ [MLX4_NET_TRANS_RULE_ID_TCP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_UDP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+};
+
+int mlx4_hw_rule_sz(struct mlx4_dev *dev,
+ enum mlx4_net_trans_rule_id id)
+{
+ if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
+ return -EINVAL;
+ }
+
+ return __rule_hw_sz[id];
+}
+EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
+
static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
struct _rule_hw *rule_hw)
{
- static const size_t __rule_hw_sz[] = {
- [MLX4_NET_TRANS_RULE_ID_ETH] =
- sizeof(struct mlx4_net_trans_rule_hw_eth),
- [MLX4_NET_TRANS_RULE_ID_IB] =
- sizeof(struct mlx4_net_trans_rule_hw_ib),
- [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
- [MLX4_NET_TRANS_RULE_ID_IPV4] =
- sizeof(struct mlx4_net_trans_rule_hw_ipv4),
- [MLX4_NET_TRANS_RULE_ID_TCP] =
- sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
- [MLX4_NET_TRANS_RULE_ID_UDP] =
- sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
- };
- if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
- mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+ if (mlx4_hw_rule_sz(dev, spec->id) < 0)
return -EINVAL;
- }
- memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+ memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
- rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+ rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
switch (spec->id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->eth.ether_type_enable = 1;
rule_hw->eth.ether_type = spec->eth.ether_type;
}
- rule_hw->eth.vlan_id = spec->eth.vlan_id;
- rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+ rule_hw->eth.vlan_tag = spec->eth.vlan_id;
+ rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
break;
case MLX4_NET_TRANS_RULE_ID_IB:
- rule_hw->ib.qpn = spec->ib.r_qpn;
+ rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
@@ -1136,7 +1170,7 @@ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
@@ -1229,11 +1263,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
u64 *regid_p;
switch (mode) {
- case MLX4_FS_PROMISC_UPLINK:
- case MLX4_FS_PROMISC_FUNCTION_PORT:
+ case MLX4_FS_ALL_DEFAULT:
regid_p = &dev->regid_promisc_array[port];
break;
- case MLX4_FS_PROMISC_ALL_MULTI:
+ case MLX4_FS_MC_DEFAULT:
regid_p = &dev->regid_allmulti_array[port];
break;
default:
@@ -1260,11 +1293,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
u64 *regid_p;
switch (mode) {
- case MLX4_FS_PROMISC_UPLINK:
- case MLX4_FS_PROMISC_FUNCTION_PORT:
+ case MLX4_FS_ALL_DEFAULT:
regid_p = &dev->regid_promisc_array[port];
break;
- case MLX4_FS_PROMISC_ALL_MULTI:
+ case MLX4_FS_MC_DEFAULT:
regid_p = &dev->regid_allmulti_array[port];
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index eac3dae..df15bb6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -730,85 +730,6 @@ struct mlx4_steer {
struct list_head steer_entries[MLX4_NUM_STEERS];
};
-struct mlx4_net_trans_rule_hw_ctrl {
- __be32 ctrl;
- u8 rsvd1;
- u8 funcid;
- u8 vep;
- u8 port;
- __be32 qpn;
- __be32 rsvd2;
-};
-
-struct mlx4_net_trans_rule_hw_ib {
- u8 size;
- u8 rsvd1;
- __be16 id;
- u32 rsvd2;
- __be32 qpn;
- __be32 qpn_mask;
- u8 dst_gid[16];
- u8 dst_gid_msk[16];
-} __packed;
-
-struct mlx4_net_trans_rule_hw_eth {
- u8 size;
- u8 rsvd;
- __be16 id;
- u8 rsvd1[6];
- u8 dst_mac[6];
- u16 rsvd2;
- u8 dst_mac_msk[6];
- u16 rsvd3;
- u8 src_mac[6];
- u16 rsvd4;
- u8 src_mac_msk[6];
- u8 rsvd5;
- u8 ether_type_enable;
- __be16 ether_type;
- __be16 vlan_id_msk;
- __be16 vlan_id;
-} __packed;
-
-struct mlx4_net_trans_rule_hw_tcp_udp {
- u8 size;
- u8 rsvd;
- __be16 id;
- __be16 rsvd1[3];
- __be16 dst_port;
- __be16 rsvd2;
- __be16 dst_port_msk;
- __be16 rsvd3;
- __be16 src_port;
- __be16 rsvd4;
- __be16 src_port_msk;
-} __packed;
-
-struct mlx4_net_trans_rule_hw_ipv4 {
- u8 size;
- u8 rsvd;
- __be16 id;
- __be32 rsvd1;
- __be32 dst_ip;
- __be32 dst_ip_msk;
- __be32 src_ip;
- __be32 src_ip_msk;
-} __packed;
-
-struct _rule_hw {
- union {
- struct {
- u8 size;
- u8 rsvd;
- __be16 id;
- };
- struct mlx4_net_trans_rule_hw_eth eth;
- struct mlx4_net_trans_rule_hw_ib ib;
- struct mlx4_net_trans_rule_hw_ipv4 ipv4;
- struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
- };
-};
-
enum {
MLX4_PCI_DEV_IS_VF = 1 << 0,
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index e329fe1..79fd269 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
+
+struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_srq *srq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&srq_table->lock, flags);
+ srq = radix_tree_lookup(&srq_table->tree,
+ srqn & (dev->caps.num_srqs - 1));
+ spin_unlock_irqrestore(&srq_table->lock, flags);
+
+ return srq;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 4486102..71998e7 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1528,7 +1528,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
return 0;
fail6:
- BUG_ON(i2c_del_adapter(&board->i2c_adap));
+ i2c_del_adapter(&board->i2c_adap);
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
@@ -1666,13 +1666,11 @@ static void falcon_remove_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_board *board = falcon_board(efx);
- int rc;
board->type->fini(efx);
/* Remove I2C adapter and clear it in preparation for a retry */
- rc = i2c_del_adapter(&board->i2c_adap);
- BUG_ON(rc);
+ i2c_del_adapter(&board->i2c_adap);
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
efx_nic_free_buffer(efx, &efx->irq_status);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 48e2b99..3663b9e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/crc32.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -144,6 +145,9 @@ struct smsc911x_data {
/* regulators */
struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
+
+ /* clock */
+ struct clk *clk;
};
/* Easy access to information */
@@ -369,7 +373,7 @@ out:
}
/*
- * enable resources, currently just regulators.
+ * enable regulator and clock resources.
*/
static int smsc911x_enable_resources(struct platform_device *pdev)
{
@@ -382,6 +386,13 @@ static int smsc911x_enable_resources(struct platform_device *pdev)
if (ret)
netdev_err(ndev, "failed to enable regulators %d\n",
ret);
+
+ if (!IS_ERR(pdata->clk)) {
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret < 0)
+ netdev_err(ndev, "failed to enable clock %d\n", ret);
+ }
+
return ret;
}
@@ -396,6 +407,10 @@ static int smsc911x_disable_resources(struct platform_device *pdev)
ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
+
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
+
return ret;
}
@@ -421,6 +436,12 @@ static int smsc911x_request_resources(struct platform_device *pdev)
if (ret)
netdev_err(ndev, "couldn't get regulators %d\n",
ret);
+
+ /* Request clock */
+ pdata->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk))
+ netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk));
+
return ret;
}
@@ -436,6 +457,12 @@ static void smsc911x_free_resources(struct platform_device *pdev)
/* Free regulators */
regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
+
+ /* Free clock */
+ if (!IS_ERR(pdata->clk)) {
+ clk_put(pdata->clk);
+ pdata->clk = NULL;
+ }
}
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 59c4391..21a5b29 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -555,8 +555,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
- cpsw_enable_irq(priv);
prim_cpsw->irq_enabled = true;
+ cpsw_enable_irq(priv);
}
}
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 49b8b58..484f77e 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -449,7 +449,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
if ((--bc->hdlctx.slotcnt) > 0)
return 0;
bc->hdlctx.slotcnt = bc->ch_params.slottime;
- if ((random32() % 256) > bc->ch_params.ppersist)
+ if ((prandom_u32() % 256) > bc->ch_params.ppersist)
return 0;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index a4a3516..3169252 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -389,7 +389,7 @@ void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
if ((--s->hdlctx.slotcnt) > 0)
return;
s->hdlctx.slotcnt = s->ch_params.slottime;
- if ((random32() % 256) > s->ch_params.ppersist)
+ if ((prandom_u32() % 256) > s->ch_params.ppersist)
return;
start_tx(dev, s);
}
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index b2d863f..0721e72 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -638,7 +638,7 @@ static void yam_arbitrate(struct net_device *dev)
yp->slotcnt = yp->slot / 10;
/* is random > persist ? */
- if ((random32() % 256) > yp->pers)
+ if ((prandom_u32() % 256) > yp->pers)
return;
yam_start_tx(dev, yp);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 2f99f88..5f47584 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -383,7 +383,7 @@ static int vlsi_seq_show(struct seq_file *seq, void *v)
static int vlsi_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, vlsi_seq_show, PDE(inode)->data);
+ return single_open(file, vlsi_seq_show, PDE_DATA(inode));
}
static const struct file_operations vlsi_proc_fops = {
@@ -1678,7 +1678,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
IRDA_WARNING("%s: failed to create proc entry\n",
__func__);
} else {
- ent->size = 0;
+ proc_set_size(ent, 0);
}
idev->proc_entry = ent;
}
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 9eabfaa..5ca14d4 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -18,7 +18,7 @@
static u32 random_N(unsigned int N)
{
- return reciprocal_divide(random32(), N);
+ return reciprocal_divide(prandom_u32(), N);
}
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7c769d8..287cc62 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -93,6 +93,17 @@ config USB_RTL8150
To compile this driver as a module, choose M here: the
module will be called rtl8150.
+config USB_RTL8152
+ tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+ select NET_CORE
+ select MII
+ help
+ This option adds support for Realtek RTL8152 based USB 2.0
+ 10/100 Ethernet adapters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called r8152.
+
config USB_USBNET
tristate "Multi-purpose USB Networking Framework"
select NET_CORE
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 119b06c..9ab5c9d 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_USB_CATC) += catc.o
obj-$(CONFIG_USB_KAWETH) += kaweth.o
obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
+obj-$(CONFIG_USB_RTL8152) += r8152.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f7f623a..577c72d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
rx->size);
kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ rx->size = 0U;
+
return 0;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4ff71d6..24fbec2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -479,6 +479,7 @@ static const struct driver_info wwan_info = {
#define NOVATEL_VENDOR_ID 0x1410
#define ZTE_VENDOR_ID 0x19D2
#define DELL_VENDOR_ID 0x413C
+#define REALTEK_VENDOR_ID 0x0bda
static const struct usb_device_id products [] = {
/*
@@ -619,6 +620,15 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
+#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
+{
+ USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/*
* WHITELIST!!!
*
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 0969905..03e8a15 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -256,8 +256,9 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
{
pegasus_t *pegasus = netdev_priv(dev);
+ u16 data = val;
- write_mii_word(pegasus, phy_id, loc, (__u16 *)&val);
+ write_mii_word(pegasus, phy_id, loc, &data);
}
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5a88e72..834e405 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -548,6 +548,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
+ {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
{QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
new file mode 100644
index 0000000..14e5198
--- /dev/null
+++ b/drivers/net/usb/r8152.c
@@ -0,0 +1,1767 @@
+/*
+ * Copyright (c) 2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+/* Version Information */
+#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
+#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
+#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+#define MODULENAME "r8152"
+
+#define R8152_PHY_ID 32
+
+#define PLA_IDR 0xc000
+#define PLA_RCR 0xc010
+#define PLA_RMS 0xc016
+#define PLA_RXFIFO_CTRL0 0xc0a0
+#define PLA_RXFIFO_CTRL1 0xc0a4
+#define PLA_RXFIFO_CTRL2 0xc0a8
+#define PLA_FMC 0xc0b4
+#define PLA_CFG_WOL 0xc0b6
+#define PLA_MAR 0xcd00
+#define PAL_BDC_CR 0xd1a0
+#define PLA_LEDSEL 0xdd90
+#define PLA_LED_FEATURE 0xdd92
+#define PLA_PHYAR 0xde00
+#define PLA_GPHY_INTR_IMR 0xe022
+#define PLA_EEE_CR 0xe040
+#define PLA_EEEP_CR 0xe080
+#define PLA_MAC_PWR_CTRL 0xe0c0
+#define PLA_TCR0 0xe610
+#define PLA_TCR1 0xe612
+#define PLA_TXFIFO_CTRL 0xe618
+#define PLA_RSTTELLY 0xe800
+#define PLA_CR 0xe813
+#define PLA_CRWECR 0xe81c
+#define PLA_CONFIG5 0xe822
+#define PLA_PHY_PWR 0xe84c
+#define PLA_OOB_CTRL 0xe84f
+#define PLA_CPCR 0xe854
+#define PLA_MISC_0 0xe858
+#define PLA_MISC_1 0xe85a
+#define PLA_OCP_GPHY_BASE 0xe86c
+#define PLA_TELLYCNT 0xe890
+#define PLA_SFF_STS_7 0xe8de
+#define PLA_PHYSTATUS 0xe908
+#define PLA_BP_BA 0xfc26
+#define PLA_BP_0 0xfc28
+#define PLA_BP_1 0xfc2a
+#define PLA_BP_2 0xfc2c
+#define PLA_BP_3 0xfc2e
+#define PLA_BP_4 0xfc30
+#define PLA_BP_5 0xfc32
+#define PLA_BP_6 0xfc34
+#define PLA_BP_7 0xfc36
+
+#define USB_DEV_STAT 0xb808
+#define USB_USB_CTRL 0xd406
+#define USB_PHY_CTRL 0xd408
+#define USB_TX_AGG 0xd40a
+#define USB_RX_BUF_TH 0xd40c
+#define USB_USB_TIMER 0xd428
+#define USB_PM_CTRL_STATUS 0xd432
+#define USB_TX_DMA 0xd434
+#define USB_UPS_CTRL 0xd800
+#define USB_BP_BA 0xfc26
+#define USB_BP_0 0xfc28
+#define USB_BP_1 0xfc2a
+#define USB_BP_2 0xfc2c
+#define USB_BP_3 0xfc2e
+#define USB_BP_4 0xfc30
+#define USB_BP_5 0xfc32
+#define USB_BP_6 0xfc34
+#define USB_BP_7 0xfc36
+
+/* OCP Registers */
+#define OCP_ALDPS_CONFIG 0x2010
+#define OCP_EEE_CONFIG1 0x2080
+#define OCP_EEE_CONFIG2 0x2092
+#define OCP_EEE_CONFIG3 0x2094
+#define OCP_EEE_AR 0xa41a
+#define OCP_EEE_DATA 0xa41c
+
+/* PLA_RCR */
+#define RCR_AAP 0x00000001
+#define RCR_APM 0x00000002
+#define RCR_AM 0x00000004
+#define RCR_AB 0x00000008
+#define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB)
+
+/* PLA_RXFIFO_CTRL0 */
+#define RXFIFO_THR1_NORMAL 0x00080002
+#define RXFIFO_THR1_OOB 0x01800003
+
+/* PLA_RXFIFO_CTRL1 */
+#define RXFIFO_THR2_FULL 0x00000060
+#define RXFIFO_THR2_HIGH 0x00000038
+#define RXFIFO_THR2_OOB 0x0000004a
+
+/* PLA_RXFIFO_CTRL2 */
+#define RXFIFO_THR3_FULL 0x00000078
+#define RXFIFO_THR3_HIGH 0x00000048
+#define RXFIFO_THR3_OOB 0x0000005a
+
+/* PLA_TXFIFO_CTRL */
+#define TXFIFO_THR_NORMAL 0x00400008
+
+/* PLA_FMC */
+#define FMC_FCR_MCU_EN 0x0001
+
+/* PLA_EEEP_CR */
+#define EEEP_CR_EEEP_TX 0x0002
+
+/* PLA_TCR0 */
+#define TCR0_TX_EMPTY 0x0800
+#define TCR0_AUTO_FIFO 0x0080
+
+/* PLA_TCR1 */
+#define VERSION_MASK 0x7cf0
+
+/* PLA_CR */
+#define CR_RST 0x10
+#define CR_RE 0x08
+#define CR_TE 0x04
+
+/* PLA_CRWECR */
+#define CRWECR_NORAML 0x00
+#define CRWECR_CONFIG 0xc0
+
+/* PLA_OOB_CTRL */
+#define NOW_IS_OOB 0x80
+#define TXFIFO_EMPTY 0x20
+#define RXFIFO_EMPTY 0x10
+#define LINK_LIST_READY 0x02
+#define DIS_MCU_CLROOB 0x01
+#define FIFO_EMPTY (TXFIFO_EMPTY | RXFIFO_EMPTY)
+
+/* PLA_MISC_1 */
+#define RXDY_GATED_EN 0x0008
+
+/* PLA_SFF_STS_7 */
+#define RE_INIT_LL 0x8000
+#define MCU_BORW_EN 0x4000
+
+/* PLA_CPCR */
+#define CPCR_RX_VLAN 0x0040
+
+/* PLA_CFG_WOL */
+#define MAGIC_EN 0x0001
+
+/* PAL_BDC_CR */
+#define ALDPS_PROXY_MODE 0x0001
+
+/* PLA_CONFIG5 */
+#define LAN_WAKE_EN 0x0002
+
+/* PLA_LED_FEATURE */
+#define LED_MODE_MASK 0x0700
+
+/* PLA_PHY_PWR */
+#define TX_10M_IDLE_EN 0x0080
+#define PFM_PWM_SWITCH 0x0040
+
+/* PLA_MAC_PWR_CTRL */
+#define D3_CLK_GATED_EN 0x00004000
+#define MCU_CLK_RATIO 0x07010f07
+#define MCU_CLK_RATIO_MASK 0x0f0f0f0f
+
+/* PLA_GPHY_INTR_IMR */
+#define GPHY_STS_MSK 0x0001
+#define SPEED_DOWN_MSK 0x0002
+#define SPDWN_RXDV_MSK 0x0004
+#define SPDWN_LINKCHG_MSK 0x0008
+
+/* PLA_PHYAR */
+#define PHYAR_FLAG 0x80000000
+
+/* PLA_EEE_CR */
+#define EEE_RX_EN 0x0001
+#define EEE_TX_EN 0x0002
+
+/* USB_DEV_STAT */
+#define STAT_SPEED_MASK 0x0006
+#define STAT_SPEED_HIGH 0x0000
+#define STAT_SPEED_FULL 0x0001
+
+/* USB_TX_AGG */
+#define TX_AGG_MAX_THRESHOLD 0x03
+
+/* USB_RX_BUF_TH */
+#define RX_BUF_THR 0x7a120180
+
+/* USB_TX_DMA */
+#define TEST_MODE_DISABLE 0x00000001
+#define TX_SIZE_ADJUST1 0x00000100
+
+/* USB_UPS_CTRL */
+#define POWER_CUT 0x0100
+
+/* USB_PM_CTRL_STATUS */
+#define RWSUME_INDICATE 0x0001
+
+/* USB_USB_CTRL */
+#define RX_AGG_DISABLE 0x0010
+
+/* OCP_ALDPS_CONFIG */
+#define ENPWRSAVE 0x8000
+#define ENPDNPS 0x0200
+#define LINKENA 0x0100
+#define DIS_SDSAVE 0x0010
+
+/* OCP_EEE_CONFIG1 */
+#define RG_TXLPI_MSK_HFDUP 0x8000
+#define RG_MATCLR_EN 0x4000
+#define EEE_10_CAP 0x2000
+#define EEE_NWAY_EN 0x1000
+#define TX_QUIET_EN 0x0200
+#define RX_QUIET_EN 0x0100
+#define SDRISETIME 0x0010 /* bit 4 ~ 6 */
+#define RG_RXLPI_MSK_HFDUP 0x0008
+#define SDFALLTIME 0x0007 /* bit 0 ~ 2 */
+
+/* OCP_EEE_CONFIG2 */
+#define RG_LPIHYS_NUM 0x7000 /* bit 12 ~ 15 */
+#define RG_DACQUIET_EN 0x0400
+#define RG_LDVQUIET_EN 0x0200
+#define RG_CKRSEL 0x0020
+#define RG_EEEPRG_EN 0x0010
+
+/* OCP_EEE_CONFIG3 */
+#define FST_SNR_EYE_R 0x1500 /* bit 7 ~ 15 */
+#define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */
+#define MSK_PH 0x0006 /* bit 0 ~ 3 */
+
+/* OCP_EEE_AR */
+/* bit[15:14] function */
+#define FUN_ADDR 0x0000
+#define FUN_DATA 0x4000
+/* bit[4:0] device addr */
+#define DEVICE_ADDR 0x0007
+
+/* OCP_EEE_DATA */
+#define EEE_ADDR 0x003C
+#define EEE_DATA 0x0002
+
+enum rtl_register_content {
+ _100bps = 0x08,
+ _10bps = 0x04,
+ LINK_STATUS = 0x02,
+ FULL_DUP = 0x01,
+};
+
+#define RTL8152_REQT_READ 0xc0
+#define RTL8152_REQT_WRITE 0x40
+#define RTL8152_REQ_GET_REGS 0x05
+#define RTL8152_REQ_SET_REGS 0x05
+
+#define BYTE_EN_DWORD 0xff
+#define BYTE_EN_WORD 0x33
+#define BYTE_EN_BYTE 0x11
+#define BYTE_EN_SIX_BYTES 0x3f
+#define BYTE_EN_START_MASK 0x0f
+#define BYTE_EN_END_MASK 0xf0
+
+#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
+#define RTL8152_TX_TIMEOUT (HZ)
+
+/* rtl8152 flags */
+enum rtl8152_flags {
+ RTL8152_UNPLUG = 0,
+ RX_URB_FAIL,
+ RTL8152_SET_RX_MODE,
+ WORK_ENABLE
+};
+
+/* Define these values to match your device */
+#define VENDOR_ID_REALTEK 0x0bda
+#define PRODUCT_ID_RTL8152 0x8152
+
+#define MCU_TYPE_PLA 0x0100
+#define MCU_TYPE_USB 0x0000
+
+struct rx_desc {
+ u32 opts1;
+#define RX_LEN_MASK 0x7fff
+ u32 opts2;
+ u32 opts3;
+ u32 opts4;
+ u32 opts5;
+ u32 opts6;
+};
+
+struct tx_desc {
+ u32 opts1;
+#define TX_FS (1 << 31) /* First segment of a packet */
+#define TX_LS (1 << 30) /* Final segment of a packet */
+#define TX_LEN_MASK 0xffff
+ u32 opts2;
+};
+
+struct r8152 {
+ unsigned long flags;
+ struct usb_device *udev;
+ struct tasklet_struct tl;
+ struct net_device *netdev;
+ struct urb *rx_urb, *tx_urb;
+ struct sk_buff *tx_skb, *rx_skb;
+ struct delayed_work schedule;
+ struct mii_if_info mii;
+ u32 msg_enable;
+ u16 ocp_base;
+ u8 version;
+ u8 speed;
+};
+
+enum rtl_version {
+ RTL_VER_UNKNOWN = 0,
+ RTL_VER_01,
+ RTL_VER_02
+};
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * The RTL chips use a 64 element hash table based on the Ethernet CRC.
+ */
+static const int multicast_filter_limit = 32;
+
+static
+int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+{
+ return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ value, index, data, size, 500);
+}
+
+static
+int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+{
+ return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
+ RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+ value, index, data, size, 500);
+}
+
+static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
+ void *data, u16 type)
+{
+ u16 limit = 64;
+ int ret = 0;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
+ /* both size and indix must be 4 bytes align */
+ if ((size & 3) || !size || (index & 3) || !data)
+ return -EPERM;
+
+ if ((u32)index + (u32)size > 0xffff)
+ return -EPERM;
+
+ while (size) {
+ if (size > limit) {
+ ret = get_registers(tp, index, type, limit, data);
+ if (ret < 0)
+ break;
+
+ index += limit;
+ data += limit;
+ size -= limit;
+ } else {
+ ret = get_registers(tp, index, type, size, data);
+ if (ret < 0)
+ break;
+
+ index += size;
+ data += size;
+ size = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
+ u16 size, void *data, u16 type)
+{
+ int ret;
+ u16 byteen_start, byteen_end, byen;
+ u16 limit = 512;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
+ /* both size and indix must be 4 bytes align */
+ if ((size & 3) || !size || (index & 3) || !data)
+ return -EPERM;
+
+ if ((u32)index + (u32)size > 0xffff)
+ return -EPERM;
+
+ byteen_start = byteen & BYTE_EN_START_MASK;
+ byteen_end = byteen & BYTE_EN_END_MASK;
+
+ byen = byteen_start | (byteen_start << 4);
+ ret = set_registers(tp, index, type | byen, 4, data);
+ if (ret < 0)
+ goto error1;
+
+ index += 4;
+ data += 4;
+ size -= 4;
+
+ if (size) {
+ size -= 4;
+
+ while (size) {
+ if (size > limit) {
+ ret = set_registers(tp, index,
+ type | BYTE_EN_DWORD,
+ limit, data);
+ if (ret < 0)
+ goto error1;
+
+ index += limit;
+ data += limit;
+ size -= limit;
+ } else {
+ ret = set_registers(tp, index,
+ type | BYTE_EN_DWORD,
+ size, data);
+ if (ret < 0)
+ goto error1;
+
+ index += size;
+ data += size;
+ size = 0;
+ break;
+ }
+ }
+
+ byen = byteen_end | (byteen_end >> 4);
+ ret = set_registers(tp, index, type | byen, 4, data);
+ if (ret < 0)
+ goto error1;
+ }
+
+error1:
+ return ret;
+}
+
+static inline
+int pla_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data)
+{
+ return generic_ocp_read(tp, index, size, data, MCU_TYPE_PLA);
+}
+
+static inline
+int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
+{
+ return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA);
+}
+
+static inline
+int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data)
+{
+ return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB);
+}
+
+static inline
+int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
+{
+ return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB);
+}
+
+static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
+{
+ u32 data;
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_read(tp, index, sizeof(data), &data);
+ else
+ usb_ocp_read(tp, index, sizeof(data), &data);
+
+ return __le32_to_cpu(data);
+}
+
+static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
+{
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
+ else
+ usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
+}
+
+static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
+{
+ u32 data;
+ u8 shift = index & 2;
+
+ index &= ~3;
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_read(tp, index, sizeof(data), &data);
+ else
+ usb_ocp_read(tp, index, sizeof(data), &data);
+
+ data = __le32_to_cpu(data);
+ data >>= (shift * 8);
+ data &= 0xffff;
+
+ return (u16)data;
+}
+
+static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
+{
+ u32 tmp, mask = 0xffff;
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+
+ data &= mask;
+
+ if (index & 2) {
+ byen <<= shift;
+ mask <<= (shift * 8);
+ data <<= (shift * 8);
+ index &= ~3;
+ }
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_read(tp, index, sizeof(tmp), &tmp);
+ else
+ usb_ocp_read(tp, index, sizeof(tmp), &tmp);
+
+ tmp = __le32_to_cpu(tmp) & ~mask;
+ tmp |= data;
+ tmp = __cpu_to_le32(tmp);
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+ else
+ usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+}
+
+static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
+{
+ u32 data;
+ u8 shift = index & 3;
+
+ index &= ~3;
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_read(tp, index, sizeof(data), &data);
+ else
+ usb_ocp_read(tp, index, sizeof(data), &data);
+
+ data = __le32_to_cpu(data);
+ data >>= (shift * 8);
+ data &= 0xff;
+
+ return (u8)data;
+}
+
+static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
+{
+ u32 tmp, mask = 0xff;
+ u16 byen = BYTE_EN_BYTE;
+ u8 shift = index & 3;
+
+ data &= mask;
+
+ if (index & 3) {
+ byen <<= shift;
+ mask <<= (shift * 8);
+ data <<= (shift * 8);
+ index &= ~3;
+ }
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_read(tp, index, sizeof(tmp), &tmp);
+ else
+ usb_ocp_read(tp, index, sizeof(tmp), &tmp);
+
+ tmp = __le32_to_cpu(tmp) & ~mask;
+ tmp |= data;
+ tmp = __cpu_to_le32(tmp);
+
+ if (type == MCU_TYPE_PLA)
+ pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+ else
+ usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+}
+
+static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) |
+ (value & 0xffff);
+
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+
+ for (i = 20; i > 0; i--) {
+ udelay(25);
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
+ if (!(ocp_data & PHYAR_FLAG))
+ break;
+ }
+ udelay(20);
+}
+
+static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = (reg_addr & 0x1f) << 16;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+
+ for (i = 20; i > 0; i--) {
+ udelay(25);
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
+ if (ocp_data & PHYAR_FLAG)
+ break;
+ }
+ udelay(20);
+
+ if (!(ocp_data & PHYAR_FLAG))
+ return -EAGAIN;
+
+ return (u16)(ocp_data & 0xffff);
+}
+
+static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ if (phy_id != R8152_PHY_ID)
+ return -EINVAL;
+
+ return r8152_mdio_read(tp, reg);
+}
+
+static
+void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ if (phy_id != R8152_PHY_ID)
+ return;
+
+ r8152_mdio_write(tp, reg, val);
+}
+
+static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ u16 ocp_base, ocp_index;
+
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
+ }
+
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
+}
+
+static inline void set_ethernet_addr(struct r8152 *tp)
+{
+ struct net_device *dev = tp->netdev;
+ u8 *node_id;
+
+ node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
+ if (!node_id) {
+ netif_err(tp, probe, dev, "out of memory");
+ return;
+ }
+
+ if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0)
+ netif_notice(tp, probe, dev, "inet addr fail\n");
+ else {
+ memcpy(dev->dev_addr, node_id, dev->addr_len);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ }
+ kfree(node_id);
+}
+
+static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+
+ return 0;
+}
+
+static int alloc_all_urbs(struct r8152 *tp)
+{
+ tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tp->rx_urb)
+ return 0;
+ tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tp->tx_urb) {
+ usb_free_urb(tp->rx_urb);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void free_all_urbs(struct r8152 *tp)
+{
+ usb_free_urb(tp->rx_urb);
+ usb_free_urb(tp->tx_urb);
+}
+
+static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
+{
+ return &dev->stats;
+}
+
+static void read_bulk_callback(struct urb *urb)
+{
+ struct r8152 *tp;
+ unsigned pkt_len;
+ struct sk_buff *skb;
+ struct net_device *netdev;
+ struct net_device_stats *stats;
+ int status = urb->status;
+ int result;
+ struct rx_desc *rx_desc;
+
+ tp = urb->context;
+ if (!tp)
+ return;
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+ netdev = tp->netdev;
+ if (!netif_device_present(netdev))
+ return;
+
+ stats = rtl8152_get_stats(netdev);
+ switch (status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+ netif_device_detach(tp->netdev);
+ case -ENOENT:
+ return; /* the urb is in unlink state */
+ case -ETIME:
+ pr_warn_ratelimited("may be reset is needed?..\n");
+ goto goon;
+ default:
+ pr_warn_ratelimited("Rx status %d\n", status);
+ goto goon;
+ }
+
+ /* protect against short packets (tell me why we got some?!?) */
+ if (urb->actual_length < sizeof(*rx_desc))
+ goto goon;
+
+
+ rx_desc = (struct rx_desc *)urb->transfer_buffer;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
+ goto goon;
+
+ skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ if (!skb)
+ goto goon;
+
+ memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+goon:
+ usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
+ (usb_complete_t)read_bulk_callback, tp);
+ result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+ if (result == -ENODEV) {
+ netif_device_detach(tp->netdev);
+ } else if (result) {
+ set_bit(RX_URB_FAIL, &tp->flags);
+ goto resched;
+ } else {
+ clear_bit(RX_URB_FAIL, &tp->flags);
+ }
+
+ return;
+resched:
+ tasklet_schedule(&tp->tl);
+}
+
+static void rx_fixup(unsigned long data)
+{
+ struct r8152 *tp;
+ int status;
+
+ tp = (struct r8152 *)data;
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+ status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+ if (status == -ENODEV) {
+ netif_device_detach(tp->netdev);
+ } else if (status) {
+ set_bit(RX_URB_FAIL, &tp->flags);
+ goto tlsched;
+ } else {
+ clear_bit(RX_URB_FAIL, &tp->flags);
+ }
+
+ return;
+tlsched:
+ tasklet_schedule(&tp->tl);
+}
+
+static void write_bulk_callback(struct urb *urb)
+{
+ struct r8152 *tp;
+ int status = urb->status;
+
+ tp = urb->context;
+ if (!tp)
+ return;
+ dev_kfree_skb_irq(tp->tx_skb);
+ if (!netif_device_present(tp->netdev))
+ return;
+ if (status)
+ dev_info(&urb->dev->dev, "%s: Tx status %d\n",
+ tp->netdev->name, status);
+ tp->netdev->trans_start = jiffies;
+ netif_wake_queue(tp->netdev);
+}
+
+static void rtl8152_tx_timeout(struct net_device *netdev)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
+ usb_unlink_urb(tp->tx_urb);
+ stats->tx_errors++;
+}
+
+static void rtl8152_set_rx_mode(struct net_device *netdev)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ if (tp->speed & LINK_STATUS)
+ set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+}
+
+static void _rtl8152_set_rx_mode(struct net_device *netdev)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u32 tmp, *mc_filter; /* Multicast hash filter */
+ u32 ocp_data;
+
+ mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
+ if (!mc_filter) {
+ netif_err(tp, link, netdev, "out of memory");
+ return;
+ }
+
+ clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_data |= RCR_AB | RCR_APM;
+
+ if (netdev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ netif_notice(tp, link, netdev, "Promiscuous mode enabled\n");
+ ocp_data |= RCR_AM | RCR_AAP;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(netdev) > multicast_filter_limit) ||
+ (netdev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ ocp_data |= RCR_AM;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ ocp_data |= RCR_AM;
+ }
+ }
+
+ tmp = mc_filter[0];
+ mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1]));
+ mc_filter[1] = __cpu_to_le32(swab32(tmp));
+
+ pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+ netif_wake_queue(netdev);
+ kfree(mc_filter);
+}
+
+static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ struct tx_desc *tx_desc;
+ int len, res;
+
+ netif_stop_queue(netdev);
+ len = skb->len;
+ if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
+ struct sk_buff *tx_skb;
+
+ tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
+ dev_kfree_skb_any(skb);
+ if (!tx_skb) {
+ stats->tx_dropped++;
+ netif_wake_queue(netdev);
+ return NETDEV_TX_OK;
+ }
+ skb = tx_skb;
+ }
+ tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
+ memset(tx_desc, 0, sizeof(*tx_desc));
+ tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
+ tp->tx_skb = skb;
+ skb_tx_timestamp(skb);
+ usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ skb->data, skb->len,
+ (usb_complete_t)write_bulk_callback, tp);
+ res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
+ if (res) {
+ /* Can we get/handle EPIPE here? */
+ if (res == -ENODEV) {
+ netif_device_detach(tp->netdev);
+ } else {
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void r8152b_reset_packet_filter(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC);
+ ocp_data &= ~FMC_FCR_MCU_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data);
+ ocp_data |= FMC_FCR_MCU_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data);
+}
+
+static void rtl8152_nic_reset(struct r8152 *tp)
+{
+ int i;
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
+
+ for (i = 0; i < 1000; i++) {
+ if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
+ break;
+ udelay(100);
+ }
+}
+
+static inline u8 rtl8152_get_speed(struct r8152 *tp)
+{
+ return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
+}
+
+static int rtl8152_enable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u8 speed;
+
+ speed = rtl8152_get_speed(tp);
+ if (speed & _100bps) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ ocp_data &= ~EEEP_CR_EEEP_TX;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
+ } else {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ ocp_data |= EEEP_CR_EEEP_TX;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
+ }
+
+ r8152b_reset_packet_filter(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR);
+ ocp_data |= CR_RE | CR_TE;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
+ (usb_complete_t)read_bulk_callback, tp);
+
+ return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
+}
+
+static void rtl8152_disable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ usb_kill_urb(tp->tx_urb);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data |= RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
+ break;
+ mdelay(1);
+ }
+
+ for (i = 0; i < 1000; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
+ break;
+ mdelay(1);
+ }
+
+ usb_kill_urb(tp->rx_urb);
+
+ rtl8152_nic_reset(tp);
+}
+
+static void r8152b_exit_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data |= RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data &= ~MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ rtl8152_nic_reset(tp);
+
+ /* rx share fifo credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
+ ocp_data &= STAT_SPEED_MASK;
+ if (ocp_data == STAT_SPEED_FULL) {
+ /* rx share fifo credit near full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
+ RXFIFO_THR2_FULL);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
+ RXFIFO_THR3_FULL);
+ } else {
+ /* rx share fifo credit near full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
+ RXFIFO_THR2_HIGH);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
+ RXFIFO_THR3_HIGH);
+ }
+
+ /* TX share fifo free credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
+
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
+ TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data &= ~CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
+ ocp_data |= TCR0_AUTO_FIFO;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data);
+}
+
+static void r8152b_enter_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
+ rtl8152_disable(tp);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data |= CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data |= ALDPS_PROXY_MODE;
+ ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data |= RCR_APM | RCR_AM | RCR_AB;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+}
+
+static void r8152b_disable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
+ msleep(20);
+}
+
+static inline void r8152b_enable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
+ LINKENA | DIS_SDSAVE);
+}
+
+static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
+{
+ u16 bmcr, anar;
+ int ret = 0;
+
+ cancel_delayed_work_sync(&tp->schedule);
+ anar = r8152_mdio_read(tp, MII_ADVERTISE);
+ anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+ if (autoneg == AUTONEG_DISABLE) {
+ if (speed == SPEED_10) {
+ bmcr = 0;
+ anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ } else if (speed == SPEED_100) {
+ bmcr = BMCR_SPEED100;
+ anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+ } else {
+ if (speed == SPEED_10) {
+ if (duplex == DUPLEX_FULL)
+ anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ else
+ anar |= ADVERTISE_10HALF;
+ } else if (speed == SPEED_100) {
+ if (duplex == DUPLEX_FULL) {
+ anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ } else {
+ anar |= ADVERTISE_10HALF;
+ anar |= ADVERTISE_100HALF;
+ }
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
+ }
+
+ r8152_mdio_write(tp, MII_ADVERTISE, anar);
+ r8152_mdio_write(tp, MII_BMCR, bmcr);
+
+out:
+ schedule_delayed_work(&tp->schedule, 5 * HZ);
+
+ return ret;
+}
+
+static void rtl8152_down(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ ocp_data &= ~POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+
+ r8152b_disable_aldps(tp);
+ r8152b_enter_oob(tp);
+ r8152b_enable_aldps(tp);
+}
+
+static void set_carrier(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ u8 speed;
+
+ speed = rtl8152_get_speed(tp);
+
+ if (speed & LINK_STATUS) {
+ if (!(tp->speed & LINK_STATUS)) {
+ rtl8152_enable(tp);
+ set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_carrier_on(netdev);
+ }
+ } else {
+ if (tp->speed & LINK_STATUS) {
+ netif_carrier_off(netdev);
+ rtl8152_disable(tp);
+ }
+ }
+ tp->speed = speed;
+}
+
+static void rtl_work_func_t(struct work_struct *work)
+{
+ struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ goto out1;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ goto out1;
+
+ set_carrier(tp);
+
+ if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
+ _rtl8152_set_rx_mode(tp->netdev);
+
+ schedule_delayed_work(&tp->schedule, HZ);
+
+out1:
+ return;
+}
+
+static int rtl8152_open(struct net_device *netdev)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ int res = 0;
+
+ tp->speed = rtl8152_get_speed(tp);
+ if (tp->speed & LINK_STATUS) {
+ res = rtl8152_enable(tp);
+ if (res) {
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+
+ netif_err(tp, ifup, netdev,
+ "rtl8152_open failed: %d\n", res);
+ return res;
+ }
+
+ netif_carrier_on(netdev);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ }
+
+ rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ netif_start_queue(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+
+ return res;
+}
+
+static int rtl8152_close(struct net_device *netdev)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ int res = 0;
+
+ clear_bit(WORK_ENABLE, &tp->flags);
+ cancel_delayed_work_sync(&tp->schedule);
+ netif_stop_queue(netdev);
+ rtl8152_disable(tp);
+
+ return res;
+}
+
+static void rtl_clear_bp(struct r8152 *tp)
+{
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
+ mdelay(3);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
+}
+
+static void r8152b_enable_eee(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CONFIG1, RG_TXLPI_MSK_HFDUP | RG_MATCLR_EN |
+ EEE_10_CAP | EEE_NWAY_EN |
+ TX_QUIET_EN | RX_QUIET_EN |
+ SDRISETIME | RG_RXLPI_MSK_HFDUP |
+ SDFALLTIME);
+ ocp_reg_write(tp, OCP_EEE_CONFIG2, RG_LPIHYS_NUM | RG_DACQUIET_EN |
+ RG_LDVQUIET_EN | RG_CKRSEL |
+ RG_EEEPRG_EN);
+ ocp_reg_write(tp, OCP_EEE_CONFIG3, FST_SNR_EYE_R | RG_LFS_SEL | MSK_PH);
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | DEVICE_ADDR);
+ ocp_reg_write(tp, OCP_EEE_DATA, EEE_ADDR);
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | DEVICE_ADDR);
+ ocp_reg_write(tp, OCP_EEE_DATA, EEE_DATA);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+}
+
+static void r8152b_enable_fc(struct r8152 *tp)
+{
+ u16 anar;
+
+ anar = r8152_mdio_read(tp, MII_ADVERTISE);
+ anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ r8152_mdio_write(tp, MII_ADVERTISE, anar);
+}
+
+static void r8152b_hw_phy_cfg(struct r8152 *tp)
+{
+ r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+ r8152b_disable_aldps(tp);
+}
+
+static void r8152b_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ rtl_clear_bp(tp);
+
+ if (tp->version == RTL_VER_01) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
+ ocp_data &= ~LED_MODE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
+ }
+
+ r8152b_hw_phy_cfg(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ ocp_data &= ~POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RWSUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+
+ r8152b_exit_oob(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL);
+ ocp_data &= ~MCU_CLK_RATIO_MASK;
+ ocp_data |= MCU_CLK_RATIO | D3_CLK_GATED_EN;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ocp_data);
+ ocp_data = GPHY_STS_MSK | SPEED_DOWN_MSK |
+ SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
+
+ r8152b_enable_eee(tp);
+ r8152b_enable_aldps(tp);
+ r8152b_enable_fc(tp);
+
+ r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+ for (i = 0; i < 100; i++) {
+ udelay(100);
+ if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
+ break;
+ }
+
+ /* disable rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data |= RX_AGG_DISABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+}
+
+static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+
+ netif_device_detach(tp->netdev);
+
+ if (netif_running(tp->netdev)) {
+ clear_bit(WORK_ENABLE, &tp->flags);
+ cancel_delayed_work_sync(&tp->schedule);
+ }
+
+ rtl8152_down(tp);
+
+ return 0;
+}
+
+static int rtl8152_resume(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+
+ r8152b_init(tp);
+ netif_device_attach(tp->netdev);
+ if (netif_running(tp->netdev)) {
+ rtl8152_enable(tp);
+ set_bit(WORK_ENABLE, &tp->flags);
+ set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
+
+ return 0;
+}
+
+static void rtl8152_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ strncpy(info->driver, MODULENAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
+ usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static
+int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ if (!tp->mii.mdio_read)
+ return -EOPNOTSUPP;
+
+ return mii_ethtool_gset(&tp->mii, cmd);
+}
+
+static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = rtl8152_get_drvinfo,
+ .get_settings = rtl8152_get_settings,
+ .set_settings = rtl8152_set_settings,
+ .get_link = ethtool_op_get_link,
+};
+
+static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int res = 0;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = R8152_PHY_ID; /* Internal PHY */
+ break;
+
+ case SIOCGMIIREG:
+ data->val_out = r8152_mdio_read(tp, data->reg_num);
+ break;
+
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN)) {
+ res = -EPERM;
+ break;
+ }
+ r8152_mdio_write(tp, data->reg_num, data->val_in);
+ break;
+
+ default:
+ res = -EOPNOTSUPP;
+ }
+
+ return res;
+}
+
+static const struct net_device_ops rtl8152_netdev_ops = {
+ .ndo_open = rtl8152_open,
+ .ndo_stop = rtl8152_close,
+ .ndo_do_ioctl = rtl8152_ioctl,
+ .ndo_start_xmit = rtl8152_start_xmit,
+ .ndo_tx_timeout = rtl8152_tx_timeout,
+ .ndo_set_rx_mode = rtl8152_set_rx_mode,
+ .ndo_set_mac_address = rtl8152_set_mac_address,
+
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void r8152b_get_version(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 version;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1);
+ version = (u16)(ocp_data & VERSION_MASK);
+
+ switch (version) {
+ case 0x4c00:
+ tp->version = RTL_VER_01;
+ break;
+ case 0x4c10:
+ tp->version = RTL_VER_02;
+ break;
+ default:
+ netif_info(tp, probe, tp->netdev,
+ "Unknown version 0x%04x\n", version);
+ break;
+ }
+}
+
+static int rtl8152_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct r8152 *tp;
+ struct net_device *netdev;
+
+ if (udev->actconfig->desc.bConfigurationValue != 1) {
+ usb_driver_set_configuration(udev, 1);
+ return -ENODEV;
+ }
+
+ netdev = alloc_etherdev(sizeof(struct r8152));
+ if (!netdev) {
+ dev_err(&intf->dev, "Out of memory");
+ return -ENOMEM;
+ }
+
+ tp = netdev_priv(netdev);
+ tp->msg_enable = 0x7FFF;
+
+ tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
+ INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+
+ tp->udev = udev;
+ tp->netdev = netdev;
+ netdev->netdev_ops = &rtl8152_netdev_ops;
+ netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
+ netdev->features &= ~NETIF_F_IP_CSUM;
+ SET_ETHTOOL_OPS(netdev, &ops);
+ tp->speed = 0;
+
+ tp->mii.dev = netdev;
+ tp->mii.mdio_read = read_mii_word;
+ tp->mii.mdio_write = write_mii_word;
+ tp->mii.phy_id_mask = 0x3f;
+ tp->mii.reg_num_mask = 0x1f;
+ tp->mii.phy_id = R8152_PHY_ID;
+ tp->mii.supports_gmii = 0;
+
+ r8152b_get_version(tp);
+ r8152b_init(tp);
+ set_ethernet_addr(tp);
+
+ if (!alloc_all_urbs(tp)) {
+ netif_err(tp, probe, netdev, "out of memory");
+ goto out;
+ }
+
+ tp->rx_skb = netdev_alloc_skb(netdev,
+ RTL8152_RMS + sizeof(struct rx_desc));
+ if (!tp->rx_skb)
+ goto out1;
+
+ usb_set_intfdata(intf, tp);
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+
+ if (register_netdev(netdev) != 0) {
+ netif_err(tp, probe, netdev, "couldn't register the device");
+ goto out2;
+ }
+
+ netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
+
+ return 0;
+
+out2:
+ usb_set_intfdata(intf, NULL);
+ dev_kfree_skb(tp->rx_skb);
+out1:
+ free_all_urbs(tp);
+out:
+ free_netdev(netdev);
+ return -EIO;
+}
+
+static void rtl8152_unload(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (tp->version != RTL_VER_01) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ ocp_data |= POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RWSUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+static void rtl8152_disconnect(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+ if (tp) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+ tasklet_kill(&tp->tl);
+ unregister_netdev(tp->netdev);
+ rtl8152_unload(tp);
+ free_all_urbs(tp);
+ if (tp->rx_skb)
+ dev_kfree_skb(tp->rx_skb);
+ free_netdev(tp->netdev);
+ }
+}
+
+/* table of devices that work with this driver */
+static struct usb_device_id rtl8152_table[] = {
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8152_table);
+
+static struct usb_driver rtl8152_driver = {
+ .name = MODULENAME,
+ .probe = rtl8152_probe,
+ .disconnect = rtl8152_disconnect,
+ .id_table = rtl8152_table,
+ .suspend = rtl8152_suspend,
+ .resume = rtl8152_resume
+};
+
+static int __init usb_rtl8152_init(void)
+{
+ return usb_register(&rtl8152_driver);
+}
+
+static void __exit usb_rtl8152_exit(void)
+{
+ usb_deregister(&rtl8152_driver);
+}
+
+module_init(usb_rtl8152_init);
+module_exit(usb_rtl8152_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5007775..3c23fdc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -39,7 +39,6 @@ module_param(gso, bool, 0444);
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
-#define VIRTNET_SEND_COMMAND_SG_MAX 2
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
@@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
- err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
@@ -489,8 +488,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2,
- first, gfp);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
+ first, gfp);
if (err < 0)
give_pages(rq, first);
@@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
- err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
if (err < 0)
give_pages(rq, page);
@@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work)
bool still_empty;
int i;
- for (i = 0; i < vi->max_queue_pairs; i++) {
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
napi_disable(&rq->napi);
@@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
- for (i = 0; i < vi->max_queue_pairs; i++) {
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
@@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
- return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
- 0, skb, GFP_ATOMIC);
+ return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* never fail unless improperly formated.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *data, int out, int in)
+ struct scatterlist *out,
+ struct scatterlist *in)
{
- struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
+ struct scatterlist *sgs[4], hdr, stat;
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
- unsigned int tmp;
- int i;
+ unsigned out_num = 0, in_num = 0, tmp;
/* Caller should know better */
- BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
- (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
-
- out++; /* Add header */
- in++; /* Add return status */
+ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
ctrl.class = class;
ctrl.cmd = cmd;
+ /* Add header */
+ sg_init_one(&hdr, &ctrl, sizeof(ctrl));
+ sgs[out_num++] = &hdr;
- sg_init_table(sg, out + in);
+ if (out)
+ sgs[out_num++] = out;
+ if (in)
+ sgs[out_num + in_num++] = in;
- sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
- for_each_sg(data, s, out + in - 2, i)
- sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
- sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
+ /* Add return status. */
+ sg_init_one(&stat, &status, sizeof(status));
+ sgs[out_num + in_num++] = &stat;
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
+ BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
+ BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
+ < 0);
virtqueue_kick(vi->cvq);
@@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &sg, 1, 0)) {
+ &sg, NULL)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
return -EINVAL;
@@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
- VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
- 0, 0))
+ VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock();
}
@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
struct scatterlist sg;
struct virtio_net_ctrl_mq s;
struct net_device *dev = vi->dev;
+ int i;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
@@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
- } else
+ } else {
+ for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
+ }
return 0;
}
@@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC,
- sg, 1, 0))
+ sg, NULL))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
@@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI,
- sg, 1, 0))
+ sg, NULL))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
@@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
- sg, 2, 0))
+ sg, NULL))
dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
kfree(buf);
@@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
+ VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
@@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
+ VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
@@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Last of all, set up some receive buffers. */
- for (i = 0; i < vi->max_queue_pairs; i++) {
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
try_fill_recv(&vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
@@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev)
netif_device_attach(vi->dev);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->curr_queue_pairs; i++)
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 5329541..6125adb 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4506,108 +4506,75 @@ static int setup_proc_entry( struct net_device *dev,
apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
airo_entry);
if (!apriv->proc_entry)
- goto fail;
- apriv->proc_entry->uid = proc_kuid;
- apriv->proc_entry->gid = proc_kgid;
+ return -ENOMEM;
+ proc_set_user(apriv->proc_entry, proc_kuid, proc_kgid);
/* Setup the StatsDelta */
entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm,
apriv->proc_entry, &proc_statsdelta_ops, dev);
if (!entry)
- goto fail_stats_delta;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the Stats */
entry = proc_create_data("Stats", S_IRUGO & proc_perm,
apriv->proc_entry, &proc_stats_ops, dev);
if (!entry)
- goto fail_stats;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the Status */
entry = proc_create_data("Status", S_IRUGO & proc_perm,
apriv->proc_entry, &proc_status_ops, dev);
if (!entry)
- goto fail_status;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the Config */
entry = proc_create_data("Config", proc_perm,
apriv->proc_entry, &proc_config_ops, dev);
if (!entry)
- goto fail_config;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the SSID */
entry = proc_create_data("SSID", proc_perm,
apriv->proc_entry, &proc_SSID_ops, dev);
if (!entry)
- goto fail_ssid;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the APList */
entry = proc_create_data("APList", proc_perm,
apriv->proc_entry, &proc_APList_ops, dev);
if (!entry)
- goto fail_aplist;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the BSSList */
entry = proc_create_data("BSSList", proc_perm,
apriv->proc_entry, &proc_BSSList_ops, dev);
if (!entry)
- goto fail_bsslist;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
/* Setup the WepKey */
entry = proc_create_data("WepKey", proc_perm,
apriv->proc_entry, &proc_wepkey_ops, dev);
if (!entry)
- goto fail_wepkey;
- entry->uid = proc_kuid;
- entry->gid = proc_kgid;
-
+ goto fail;
+ proc_set_user(entry, proc_kuid, proc_kgid);
return 0;
-fail_wepkey:
- remove_proc_entry("BSSList", apriv->proc_entry);
-fail_bsslist:
- remove_proc_entry("APList", apriv->proc_entry);
-fail_aplist:
- remove_proc_entry("SSID", apriv->proc_entry);
-fail_ssid:
- remove_proc_entry("Config", apriv->proc_entry);
-fail_config:
- remove_proc_entry("Status", apriv->proc_entry);
-fail_status:
- remove_proc_entry("Stats", apriv->proc_entry);
-fail_stats:
- remove_proc_entry("StatsDelta", apriv->proc_entry);
-fail_stats_delta:
- remove_proc_entry(apriv->proc_name, airo_entry);
fail:
+ remove_proc_subtree(apriv->proc_name, airo_entry);
return -ENOMEM;
}
static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv ) {
- if ( !apriv->proc_entry->namelen ) return 0;
- remove_proc_entry("Stats",apriv->proc_entry);
- remove_proc_entry("StatsDelta",apriv->proc_entry);
- remove_proc_entry("Status",apriv->proc_entry);
- remove_proc_entry("Config",apriv->proc_entry);
- remove_proc_entry("SSID",apriv->proc_entry);
- remove_proc_entry("APList",apriv->proc_entry);
- remove_proc_entry("BSSList",apriv->proc_entry);
- remove_proc_entry("WepKey",apriv->proc_entry);
- remove_proc_entry(apriv->proc_name,airo_entry);
+ struct airo_info *apriv )
+{
+ remove_proc_subtree(apriv->proc_name, airo_entry);
return 0;
}
@@ -4663,8 +4630,7 @@ static ssize_t proc_write( struct file *file,
static int proc_status_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *apriv = dev->ml_priv;
CapabilityRid cap_rid;
StatusRid status_rid;
@@ -4746,8 +4712,7 @@ static int proc_stats_rid_open( struct inode *inode,
u16 rid )
{
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *apriv = dev->ml_priv;
StatsRid stats;
int i, j;
@@ -4809,8 +4774,7 @@ static inline int sniffing_mode(struct airo_info *ai)
static void proc_config_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
char *line;
@@ -5021,8 +4985,7 @@ static const char *get_rmode(__le16 mode)
static int proc_config_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
int i;
__le16 mode;
@@ -5112,8 +5075,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
static void proc_SSID_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
SsidRid SSID_rid;
int i;
@@ -5148,8 +5110,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
static void proc_APList_on_close( struct inode *inode, struct file *file ) {
struct proc_data *data = file->private_data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
APListRid APList_rid;
int i;
@@ -5283,8 +5244,7 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
int i, rc;
char key[16];
@@ -5335,8 +5295,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
static int proc_wepkey_open( struct inode *inode, struct file *file )
{
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
WepKeyRid wkr;
@@ -5384,8 +5343,7 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
static int proc_SSID_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5428,8 +5386,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
static int proc_APList_open( struct inode *inode, struct file *file ) {
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5468,8 +5425,7 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
static int proc_BSSList_open( struct inode *inode, struct file *file ) {
struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
+ struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
BSSListRid BSSList_rid;
@@ -5706,10 +5662,8 @@ static int __init airo_init_module( void )
airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
- if (airo_entry) {
- airo_entry->uid = proc_kuid;
- airo_entry->gid = proc_kgid;
- }
+ if (airo_entry)
+ proc_set_user(airo_entry, proc_kuid, proc_kgid);
for (i = 0; i < 4 && io[i] && irq[i]; i++) {
airo_print_info("", "Trying to configure ISA adapter at irq=%d "
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 4be07f5..727b1f5 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -216,7 +216,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
wil_debugfs_iomem_x32_set, "0x%08llx\n");
static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
- mode_t mode,
+ umode_t mode,
struct dentry *parent,
void __iomem *value)
{
@@ -359,7 +359,7 @@ static const struct file_operations fops_ioblob = {
static
struct dentry *wil_debugfs_create_ioblob(const char *name,
- mode_t mode,
+ umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 4374079..830bb1d 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -63,6 +63,7 @@
#include <net/iw_handler.h>
#include <linux/crc32.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
@@ -1409,30 +1410,28 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
return 0;
}
-static int atmel_proc_output (char *buf, struct atmel_private *priv)
+static int atmel_proc_show(struct seq_file *m, void *v)
{
+ struct atmel_private *priv = m->private;
int i;
- char *p = buf;
char *s, *r, *c;
- p += sprintf(p, "Driver version:\t\t%d.%d\n",
- DRIVER_MAJOR, DRIVER_MINOR);
+ seq_printf(m, "Driver version:\t\t%d.%d\n", DRIVER_MAJOR, DRIVER_MINOR);
if (priv->station_state != STATION_STATE_DOWN) {
- p += sprintf(p, "Firmware version:\t%d.%d build %d\n"
- "Firmware location:\t",
- priv->host_info.major_version,
- priv->host_info.minor_version,
- priv->host_info.build_version);
+ seq_printf(m,
+ "Firmware version:\t%d.%d build %d\n"
+ "Firmware location:\t",
+ priv->host_info.major_version,
+ priv->host_info.minor_version,
+ priv->host_info.build_version);
if (priv->card_type != CARD_TYPE_EEPROM)
- p += sprintf(p, "on card\n");
+ seq_puts(m, "on card\n");
else if (priv->firmware)
- p += sprintf(p, "%s loaded by host\n",
- priv->firmware_id);
+ seq_printf(m, "%s loaded by host\n", priv->firmware_id);
else
- p += sprintf(p, "%s loaded by hotplug\n",
- priv->firmware_id);
+ seq_printf(m, "%s loaded by hotplug\n", priv->firmware_id);
switch (priv->card_type) {
case CARD_TYPE_PARALLEL_FLASH:
@@ -1453,12 +1452,12 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
if (priv->reg_domain == channel_table[i].reg_domain)
r = channel_table[i].name;
- p += sprintf(p, "MAC memory type:\t%s\n", c);
- p += sprintf(p, "Regulatory domain:\t%s\n", r);
- p += sprintf(p, "Host CRC checking:\t%s\n",
- priv->do_rx_crc ? "On" : "Off");
- p += sprintf(p, "WPA-capable firmware:\t%s\n",
- priv->use_wpa ? "Yes" : "No");
+ seq_printf(m, "MAC memory type:\t%s\n", c);
+ seq_printf(m, "Regulatory domain:\t%s\n", r);
+ seq_printf(m, "Host CRC checking:\t%s\n",
+ priv->do_rx_crc ? "On" : "Off");
+ seq_printf(m, "WPA-capable firmware:\t%s\n",
+ priv->use_wpa ? "Yes" : "No");
}
switch (priv->station_state) {
@@ -1490,26 +1489,22 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
s = "<unknown>";
}
- p += sprintf(p, "Current state:\t\t%s\n", s);
- return p - buf;
+ seq_printf(m, "Current state:\t\t%s\n", s);
+ return 0;
}
-static int atmel_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int atmel_proc_open(struct inode *inode, struct file *file)
{
- struct atmel_private *priv = data;
- int len = atmel_proc_output (page, priv);
- if (len <= off+count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
+ return single_open(file, atmel_proc_show, PDE_DATA(inode));
}
+static const struct file_operations atmel_proc_fops = {
+ .open = atmel_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
.ndo_stop = atmel_close,
@@ -1525,7 +1520,6 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
struct device *sys_dev,
int (*card_present)(void *), void *card)
{
- struct proc_dir_entry *ent;
struct net_device *dev;
struct atmel_private *priv;
int rc;
@@ -1630,8 +1624,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
netif_carrier_off(dev);
- ent = create_proc_read_entry ("driver/atmel", 0, NULL, atmel_read_proc, priv);
- if (!ent)
+ if (!proc_create_data("driver/atmel", 0, NULL, &atmel_proc_fops, priv));
printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 2b90da0..e7a1a47 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1117,7 +1117,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
/* 100ms ~ 300ms */
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
- 100 * (1 + (random32() % 3)));
+ 100 * (1 + prandom_u32() % 3));
else
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index dd9a18f..d6033a8 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -17,6 +17,7 @@
*/
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/if_arp.h>
@@ -64,28 +65,32 @@ static void prism2_send_mgmt(struct net_device *dev,
#ifndef PRISM2_NO_PROCFS_DEBUG
-static int ap_debug_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- char *p = page;
- struct ap_data *ap = (struct ap_data *) data;
-
- if (off != 0) {
- *eof = 1;
- return 0;
- }
-
- p += sprintf(p, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
- p += sprintf(p, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
- p += sprintf(p, "max_inactivity=%u\n", ap->max_inactivity / HZ);
- p += sprintf(p, "bridge_packets=%u\n", ap->bridge_packets);
- p += sprintf(p, "nullfunc_ack=%u\n", ap->nullfunc_ack);
- p += sprintf(p, "autom_ap_wds=%u\n", ap->autom_ap_wds);
- p += sprintf(p, "auth_algs=%u\n", ap->local->auth_algs);
- p += sprintf(p, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
+static int ap_debug_proc_show(struct seq_file *m, void *v)
+{
+ struct ap_data *ap = m->private;
+
+ seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
+ seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
+ seq_printf(m, "max_inactivity=%u\n", ap->max_inactivity / HZ);
+ seq_printf(m, "bridge_packets=%u\n", ap->bridge_packets);
+ seq_printf(m, "nullfunc_ack=%u\n", ap->nullfunc_ack);
+ seq_printf(m, "autom_ap_wds=%u\n", ap->autom_ap_wds);
+ seq_printf(m, "auth_algs=%u\n", ap->local->auth_algs);
+ seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
+ return 0;
+}
- return (p - page);
+static int ap_debug_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ap_debug_proc_show, PDE_DATA(inode));
}
+
+static const struct file_operations ap_debug_proc_fops = {
+ .open = ap_debug_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* PRISM2_NO_PROCFS_DEBUG */
@@ -325,50 +330,81 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
}
-static int ap_control_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int ap_control_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- struct ap_data *ap = (struct ap_data *) data;
+ struct ap_data *ap = m->private;
char *policy_txt;
struct mac_entry *entry;
- if (off != 0) {
- *eof = 1;
+ if (v == SEQ_START_TOKEN) {
+ switch (ap->mac_restrictions.policy) {
+ case MAC_POLICY_OPEN:
+ policy_txt = "open";
+ break;
+ case MAC_POLICY_ALLOW:
+ policy_txt = "allow";
+ break;
+ case MAC_POLICY_DENY:
+ policy_txt = "deny";
+ break;
+ default:
+ policy_txt = "unknown";
+ break;
+ }
+ seq_printf(m, "MAC policy: %s\n", policy_txt);
+ seq_printf(m, "MAC entries: %u\n", ap->mac_restrictions.entries);
+ seq_puts(m, "MAC list:\n");
return 0;
}
- switch (ap->mac_restrictions.policy) {
- case MAC_POLICY_OPEN:
- policy_txt = "open";
- break;
- case MAC_POLICY_ALLOW:
- policy_txt = "allow";
- break;
- case MAC_POLICY_DENY:
- policy_txt = "deny";
- break;
- default:
- policy_txt = "unknown";
- break;
- }
- p += sprintf(p, "MAC policy: %s\n", policy_txt);
- p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
- p += sprintf(p, "MAC list:\n");
+ entry = v;
+ seq_printf(m, "%pM\n", entry->addr);
+ return 0;
+}
+
+static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ struct ap_data *ap = m->private;
spin_lock_bh(&ap->mac_restrictions.lock);
- list_for_each_entry(entry, &ap->mac_restrictions.mac_list, list) {
- if (p - page > PAGE_SIZE - 80) {
- p += sprintf(p, "All entries did not fit one page.\n");
- break;
- }
+ return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
+}
- p += sprintf(p, "%pM\n", entry->addr);
- }
+static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ struct ap_data *ap = m->private;
+ return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
+}
+
+static void ap_control_proc_stop(struct seq_file *m, void *v)
+{
+ struct ap_data *ap = m->private;
spin_unlock_bh(&ap->mac_restrictions.lock);
+}
- return (p - page);
+static const struct seq_operations ap_control_proc_seqops = {
+ .start = ap_control_proc_start,
+ .next = ap_control_proc_next,
+ .stop = ap_control_proc_stop,
+ .show = ap_control_proc_show,
+};
+
+static int ap_control_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &ap_control_proc_seqops);
+ if (ret == 0) {
+ struct seq_file *m = file->private_data;
+ m->private = PDE_DATA(inode);
+ }
+ return ret;
}
+static const struct file_operations ap_control_proc_fops = {
+ .open = ap_control_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
{
@@ -510,61 +546,84 @@ void ap_control_kickall(struct ap_data *ap)
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-#define PROC_LIMIT (PAGE_SIZE - 80)
-
-static int prism2_ap_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int prism2_ap_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- struct ap_data *ap = (struct ap_data *) data;
- struct sta_info *sta;
+ struct sta_info *sta = v;
int i;
- if (off > PROC_LIMIT) {
- *eof = 1;
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(m, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
return 0;
}
- p += sprintf(p, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
- spin_lock_bh(&ap->sta_table_lock);
- list_for_each_entry(sta, &ap->sta_list, list) {
- if (!sta->ap)
- continue;
+ if (!sta->ap)
+ return 0;
- p += sprintf(p, "%pM %d %d %d %d '",
- sta->addr,
- sta->u.ap.channel, sta->last_rx_signal,
- sta->last_rx_silence, sta->last_rx_rate);
- for (i = 0; i < sta->u.ap.ssid_len; i++)
- p += sprintf(p, ((sta->u.ap.ssid[i] >= 32 &&
- sta->u.ap.ssid[i] < 127) ?
- "%c" : "<%02x>"),
- sta->u.ap.ssid[i]);
- p += sprintf(p, "'");
- if (sta->capability & WLAN_CAPABILITY_ESS)
- p += sprintf(p, " [ESS]");
- if (sta->capability & WLAN_CAPABILITY_IBSS)
- p += sprintf(p, " [IBSS]");
- if (sta->capability & WLAN_CAPABILITY_PRIVACY)
- p += sprintf(p, " [WEP]");
- p += sprintf(p, "\n");
-
- if ((p - page) > PROC_LIMIT) {
- printk(KERN_DEBUG "hostap: ap proc did not fit\n");
- break;
- }
- }
- spin_unlock_bh(&ap->sta_table_lock);
+ seq_printf(m, "%pM %d %d %d %d '",
+ sta->addr,
+ sta->u.ap.channel, sta->last_rx_signal,
+ sta->last_rx_silence, sta->last_rx_rate);
- if ((p - page) <= off) {
- *eof = 1;
- return 0;
+ for (i = 0; i < sta->u.ap.ssid_len; i++) {
+ if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127)
+ seq_putc(m, sta->u.ap.ssid[i]);
+ else
+ seq_printf(m, "<%02x>", sta->u.ap.ssid[i]);
}
- *start = page + off;
+ seq_putc(m, '\'');
+ if (sta->capability & WLAN_CAPABILITY_ESS)
+ seq_puts(m, " [ESS]");
+ if (sta->capability & WLAN_CAPABILITY_IBSS)
+ seq_puts(m, " [IBSS]");
+ if (sta->capability & WLAN_CAPABILITY_PRIVACY)
+ seq_puts(m, " [WEP]");
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ struct ap_data *ap = m->private;
+ spin_lock_bh(&ap->sta_table_lock);
+ return seq_list_start_head(&ap->sta_list, *_pos);
+}
- return (p - page - off);
+static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ struct ap_data *ap = m->private;
+ return seq_list_next(v, &ap->sta_list, _pos);
}
+
+static void prism2_ap_proc_stop(struct seq_file *m, void *v)
+{
+ struct ap_data *ap = m->private;
+ spin_unlock_bh(&ap->sta_table_lock);
+}
+
+static const struct seq_operations prism2_ap_proc_seqops = {
+ .start = prism2_ap_proc_start,
+ .next = prism2_ap_proc_next,
+ .stop = prism2_ap_proc_stop,
+ .show = prism2_ap_proc_show,
+};
+
+static int prism2_ap_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &prism2_ap_proc_seqops);
+ if (ret == 0) {
+ struct seq_file *m = file->private_data;
+ m->private = PDE_DATA(inode);
+ }
+ return ret;
+}
+
+static const struct file_operations prism2_ap_proc_fops = {
+ .open = prism2_ap_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
@@ -836,15 +895,12 @@ void hostap_init_ap_proc(local_info_t *local)
return;
#ifndef PRISM2_NO_PROCFS_DEBUG
- create_proc_read_entry("ap_debug", 0, ap->proc,
- ap_debug_proc_read, ap);
+ proc_create_data("ap_debug", 0, ap->proc, &ap_debug_proc_fops, ap);
#endif /* PRISM2_NO_PROCFS_DEBUG */
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- create_proc_read_entry("ap_control", 0, ap->proc,
- ap_control_proc_read, ap);
- create_proc_read_entry("ap", 0, ap->proc,
- prism2_ap_proc_read, ap);
+ proc_create_data("ap_control", 0, ap->proc, &ap_control_proc_fops, ap);
+ proc_create_data("ap", 0, ap->proc, &prism2_ap_proc_fops, ap);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
}
@@ -982,79 +1038,86 @@ static void prism2_send_mgmt(struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-static int prism2_sta_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int prism2_sta_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- struct sta_info *sta = (struct sta_info *) data;
+ struct sta_info *sta = m->private;
int i;
/* FIX: possible race condition.. the STA data could have just expired,
* but proc entry was still here so that the read could have started;
* some locking should be done here.. */
- if (off != 0) {
- *eof = 1;
- return 0;
- }
-
- p += sprintf(p, "%s=%pM\nusers=%d\naid=%d\n"
- "flags=0x%04x%s%s%s%s%s%s%s\n"
- "capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
- sta->ap ? "AP" : "STA",
- sta->addr, atomic_read(&sta->users), sta->aid,
- sta->flags,
- sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
- sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
- sta->flags & WLAN_STA_PS ? " PS" : "",
- sta->flags & WLAN_STA_TIM ? " TIM" : "",
- sta->flags & WLAN_STA_PERM ? " PERM" : "",
- sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "",
- sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "",
- sta->capability, sta->listen_interval);
+ seq_printf(m,
+ "%s=%pM\nusers=%d\naid=%d\n"
+ "flags=0x%04x%s%s%s%s%s%s%s\n"
+ "capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
+ sta->ap ? "AP" : "STA",
+ sta->addr, atomic_read(&sta->users), sta->aid,
+ sta->flags,
+ sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
+ sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
+ sta->flags & WLAN_STA_PS ? " PS" : "",
+ sta->flags & WLAN_STA_TIM ? " TIM" : "",
+ sta->flags & WLAN_STA_PERM ? " PERM" : "",
+ sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "",
+ sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "",
+ sta->capability, sta->listen_interval);
/* supported_rates: 500 kbit/s units with msb ignored */
for (i = 0; i < sizeof(sta->supported_rates); i++)
if (sta->supported_rates[i] != 0)
- p += sprintf(p, "%d%sMbps ",
- (sta->supported_rates[i] & 0x7f) / 2,
- sta->supported_rates[i] & 1 ? ".5" : "");
- p += sprintf(p, "\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n"
- "last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n"
- "tx_packets=%lu\n"
- "rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n"
- "last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n"
- "tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n"
- "tx[11M]=%d\n"
- "rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n",
- jiffies, sta->last_auth, sta->last_assoc, sta->last_rx,
- sta->last_tx,
- sta->rx_packets, sta->tx_packets, sta->rx_bytes,
- sta->tx_bytes, skb_queue_len(&sta->tx_buf),
- sta->last_rx_silence,
- sta->last_rx_signal, sta->last_rx_rate / 10,
- sta->last_rx_rate % 10 ? ".5" : "",
- sta->tx_rate, sta->tx_count[0], sta->tx_count[1],
- sta->tx_count[2], sta->tx_count[3], sta->rx_count[0],
- sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]);
+ seq_printf(m, "%d%sMbps ",
+ (sta->supported_rates[i] & 0x7f) / 2,
+ sta->supported_rates[i] & 1 ? ".5" : "");
+ seq_printf(m,
+ "\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n"
+ "last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n"
+ "tx_packets=%lu\n"
+ "rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n"
+ "last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n"
+ "tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n"
+ "tx[11M]=%d\n"
+ "rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n",
+ jiffies, sta->last_auth, sta->last_assoc, sta->last_rx,
+ sta->last_tx,
+ sta->rx_packets, sta->tx_packets, sta->rx_bytes,
+ sta->tx_bytes, skb_queue_len(&sta->tx_buf),
+ sta->last_rx_silence,
+ sta->last_rx_signal, sta->last_rx_rate / 10,
+ sta->last_rx_rate % 10 ? ".5" : "",
+ sta->tx_rate, sta->tx_count[0], sta->tx_count[1],
+ sta->tx_count[2], sta->tx_count[3], sta->rx_count[0],
+ sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]);
if (sta->crypt && sta->crypt->ops && sta->crypt->ops->print_stats)
- p = sta->crypt->ops->print_stats(p, sta->crypt->priv);
+ sta->crypt->ops->print_stats(m, sta->crypt->priv);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (sta->ap) {
if (sta->u.ap.channel >= 0)
- p += sprintf(p, "channel=%d\n", sta->u.ap.channel);
- p += sprintf(p, "ssid=");
- for (i = 0; i < sta->u.ap.ssid_len; i++)
- p += sprintf(p, ((sta->u.ap.ssid[i] >= 32 &&
- sta->u.ap.ssid[i] < 127) ?
- "%c" : "<%02x>"),
- sta->u.ap.ssid[i]);
- p += sprintf(p, "\n");
+ seq_printf(m, "channel=%d\n", sta->u.ap.channel);
+ seq_puts(m, "ssid=");
+ for (i = 0; i < sta->u.ap.ssid_len; i++) {
+ if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127)
+ seq_putc(m, sta->u.ap.ssid[i]);
+ else
+ seq_printf(m, "<%02x>", sta->u.ap.ssid[i]);
+ }
+ seq_putc(m, '\n');
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- return (p - page);
+ return 0;
+}
+
+static int prism2_sta_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prism2_sta_proc_show, PDE_DATA(inode));
}
+static const struct file_operations prism2_sta_proc_fops = {
+ .open = prism2_sta_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static void handle_add_proc_queue(struct work_struct *work)
{
@@ -1076,9 +1139,9 @@ static void handle_add_proc_queue(struct work_struct *work)
if (sta) {
sprintf(name, "%pM", sta->addr);
- sta->proc = create_proc_read_entry(
+ sta->proc = proc_create_data(
name, 0, ap->proc,
- prism2_sta_proc_read, sta);
+ &prism2_sta_proc_fops, sta);
atomic_dec(&sta->users);
}
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index e73bf73..705fe66 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -174,20 +174,70 @@ static int prism2_pda_ok(u8 *buf)
}
-static int prism2_download_aux_dump(struct net_device *dev,
- unsigned int addr, int len, u8 *buf)
-{
- int res;
+#define prism2_download_aux_dump_npages 65536
- prism2_enable_aux_port(dev, 1);
- res = hfa384x_from_aux(dev, addr, len, buf);
- prism2_enable_aux_port(dev, 0);
- if (res)
- return -1;
+struct prism2_download_aux_dump {
+ local_info_t *local;
+ u16 page[0x80];
+};
+
+static int prism2_download_aux_dump_proc_show(struct seq_file *m, void *v)
+{
+ struct prism2_download_aux_dump *ctx = m->private;
+ hfa384x_from_aux(ctx->local->dev, (unsigned long)v - 1, 0x80, ctx->page);
+ seq_write(m, ctx->page, 0x80);
return 0;
}
+static void *prism2_download_aux_dump_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ struct prism2_download_aux_dump *ctx = m->private;
+ prism2_enable_aux_port(ctx->local->dev, 1);
+ if (*_pos >= prism2_download_aux_dump_npages)
+ return NULL;
+ return (void *)((unsigned long)*_pos + 1);
+}
+
+static void *prism2_download_aux_dump_proc_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ ++*_pos;
+ if (*_pos >= prism2_download_aux_dump_npages)
+ return NULL;
+ return (void *)((unsigned long)*_pos + 1);
+}
+
+static void prism2_download_aux_dump_proc_stop(struct seq_file *m, void *v)
+{
+ struct prism2_download_aux_dump *ctx = m->private;
+ prism2_enable_aux_port(ctx->local->dev, 0);
+}
+
+static const struct seq_operations prism2_download_aux_dump_proc_seqops = {
+ .start = prism2_download_aux_dump_proc_start,
+ .next = prism2_download_aux_dump_proc_next,
+ .stop = prism2_download_aux_dump_proc_stop,
+ .show = prism2_download_aux_dump_proc_show,
+};
+
+static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open_private(file, &prism2_download_aux_dump_proc_seqops,
+ sizeof(struct prism2_download_aux_dump));
+ if (ret == 0) {
+ struct seq_file *m = file->private_data;
+ m->private = PDE_DATA(inode);
+ }
+ return ret;
+}
+
+static const struct file_operations prism2_download_aux_dump_proc_fops = {
+ .open = prism2_download_aux_dump_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
static u8 * prism2_read_pda(struct net_device *dev)
{
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 8e7000f..6307a4e 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -38,6 +38,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/random.h>
@@ -129,8 +130,7 @@ static void prism2_check_sta_fw_version(local_info_t *local);
#ifdef PRISM2_DOWNLOAD_SUPPORT
/* hostap_download.c */
-static int prism2_download_aux_dump(struct net_device *dev,
- unsigned int addr, int len, u8 *buf);
+static const struct file_operations prism2_download_aux_dump_proc_fops;
static u8 * prism2_read_pda(struct net_device *dev);
static int prism2_download(local_info_t *local,
struct prism2_download_param *param);
@@ -2894,19 +2894,12 @@ static void hostap_tick_timer(unsigned long data)
#ifndef PRISM2_NO_PROCFS_DEBUG
-static int prism2_registers_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int prism2_registers_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
-
- if (off != 0) {
- *eof = 1;
- return 0;
- }
+ local_info_t *local = m->private;
#define SHOW_REG(n) \
-p += sprintf(p, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
+ seq_printf(m, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
SHOW_REG(CMD);
SHOW_REG(PARAM0);
@@ -2952,8 +2945,21 @@ p += sprintf(p, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
SHOW_REG(PCI_M1_CTL);
#endif /* PRISM2_PCI */
- return (p - page);
+ return 0;
}
+
+static int prism2_registers_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prism2_registers_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations prism2_registers_proc_fops = {
+ .open = prism2_registers_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#endif /* PRISM2_NO_PROCFS_DEBUG */
@@ -3128,7 +3134,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
local->func->reset_port = prism2_reset_port;
local->func->schedule_reset = prism2_schedule_reset;
#ifdef PRISM2_DOWNLOAD_SUPPORT
- local->func->read_aux = prism2_download_aux_dump;
+ local->func->read_aux_fops = &prism2_download_aux_dump_proc_fops;
local->func->download = prism2_download;
#endif /* PRISM2_DOWNLOAD_SUPPORT */
local->func->tx = prism2_tx_80211;
@@ -3274,8 +3280,8 @@ static int hostap_hw_ready(struct net_device *dev)
}
hostap_init_proc(local);
#ifndef PRISM2_NO_PROCFS_DEBUG
- create_proc_read_entry("registers", 0, local->proc,
- prism2_registers_proc_read, local);
+ proc_create_data("registers", 0, local->proc,
+ &prism2_registers_proc_fops, local);
#endif /* PRISM2_NO_PROCFS_DEBUG */
hostap_init_ap_proc(local);
return 0;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index dc447c1..aa7ad3a 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -12,259 +12,297 @@
#ifndef PRISM2_NO_PROCFS_DEBUG
-static int prism2_debug_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int prism2_debug_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = m->private;
int i;
- if (off != 0) {
- *eof = 1;
- return 0;
- }
-
- p += sprintf(p, "next_txfid=%d next_alloc=%d\n",
- local->next_txfid, local->next_alloc);
+ seq_printf(m, "next_txfid=%d next_alloc=%d\n",
+ local->next_txfid, local->next_alloc);
for (i = 0; i < PRISM2_TXFID_COUNT; i++)
- p += sprintf(p, "FID: tx=%04X intransmit=%04X\n",
- local->txfid[i], local->intransmitfid[i]);
- p += sprintf(p, "FW TX rate control: %d\n", local->fw_tx_rate_control);
- p += sprintf(p, "beacon_int=%d\n", local->beacon_int);
- p += sprintf(p, "dtim_period=%d\n", local->dtim_period);
- p += sprintf(p, "wds_max_connections=%d\n",
- local->wds_max_connections);
- p += sprintf(p, "dev_enabled=%d\n", local->dev_enabled);
- p += sprintf(p, "sw_tick_stuck=%d\n", local->sw_tick_stuck);
+ seq_printf(m, "FID: tx=%04X intransmit=%04X\n",
+ local->txfid[i], local->intransmitfid[i]);
+ seq_printf(m, "FW TX rate control: %d\n", local->fw_tx_rate_control);
+ seq_printf(m, "beacon_int=%d\n", local->beacon_int);
+ seq_printf(m, "dtim_period=%d\n", local->dtim_period);
+ seq_printf(m, "wds_max_connections=%d\n", local->wds_max_connections);
+ seq_printf(m, "dev_enabled=%d\n", local->dev_enabled);
+ seq_printf(m, "sw_tick_stuck=%d\n", local->sw_tick_stuck);
for (i = 0; i < WEP_KEYS; i++) {
if (local->crypt_info.crypt[i] &&
local->crypt_info.crypt[i]->ops) {
- p += sprintf(p, "crypt[%d]=%s\n", i,
- local->crypt_info.crypt[i]->ops->name);
+ seq_printf(m, "crypt[%d]=%s\n", i,
+ local->crypt_info.crypt[i]->ops->name);
}
}
- p += sprintf(p, "pri_only=%d\n", local->pri_only);
- p += sprintf(p, "pci=%d\n", local->func->hw_type == HOSTAP_HW_PCI);
- p += sprintf(p, "sram_type=%d\n", local->sram_type);
- p += sprintf(p, "no_pri=%d\n", local->no_pri);
+ seq_printf(m, "pri_only=%d\n", local->pri_only);
+ seq_printf(m, "pci=%d\n", local->func->hw_type == HOSTAP_HW_PCI);
+ seq_printf(m, "sram_type=%d\n", local->sram_type);
+ seq_printf(m, "no_pri=%d\n", local->no_pri);
- return (p - page);
+ return 0;
}
+
+static int prism2_debug_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prism2_debug_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations prism2_debug_proc_fops = {
+ .open = prism2_debug_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* PRISM2_NO_PROCFS_DEBUG */
-static int prism2_stats_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int prism2_stats_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = m->private;
struct comm_tallies_sums *sums = &local->comm_tallies;
- if (off != 0) {
- *eof = 1;
- return 0;
- }
-
- p += sprintf(p, "TxUnicastFrames=%u\n", sums->tx_unicast_frames);
- p += sprintf(p, "TxMulticastframes=%u\n", sums->tx_multicast_frames);
- p += sprintf(p, "TxFragments=%u\n", sums->tx_fragments);
- p += sprintf(p, "TxUnicastOctets=%u\n", sums->tx_unicast_octets);
- p += sprintf(p, "TxMulticastOctets=%u\n", sums->tx_multicast_octets);
- p += sprintf(p, "TxDeferredTransmissions=%u\n",
- sums->tx_deferred_transmissions);
- p += sprintf(p, "TxSingleRetryFrames=%u\n",
- sums->tx_single_retry_frames);
- p += sprintf(p, "TxMultipleRetryFrames=%u\n",
- sums->tx_multiple_retry_frames);
- p += sprintf(p, "TxRetryLimitExceeded=%u\n",
- sums->tx_retry_limit_exceeded);
- p += sprintf(p, "TxDiscards=%u\n", sums->tx_discards);
- p += sprintf(p, "RxUnicastFrames=%u\n", sums->rx_unicast_frames);
- p += sprintf(p, "RxMulticastFrames=%u\n", sums->rx_multicast_frames);
- p += sprintf(p, "RxFragments=%u\n", sums->rx_fragments);
- p += sprintf(p, "RxUnicastOctets=%u\n", sums->rx_unicast_octets);
- p += sprintf(p, "RxMulticastOctets=%u\n", sums->rx_multicast_octets);
- p += sprintf(p, "RxFCSErrors=%u\n", sums->rx_fcs_errors);
- p += sprintf(p, "RxDiscardsNoBuffer=%u\n",
- sums->rx_discards_no_buffer);
- p += sprintf(p, "TxDiscardsWrongSA=%u\n", sums->tx_discards_wrong_sa);
- p += sprintf(p, "RxDiscardsWEPUndecryptable=%u\n",
- sums->rx_discards_wep_undecryptable);
- p += sprintf(p, "RxMessageInMsgFragments=%u\n",
- sums->rx_message_in_msg_fragments);
- p += sprintf(p, "RxMessageInBadMsgFragments=%u\n",
- sums->rx_message_in_bad_msg_fragments);
+ seq_printf(m, "TxUnicastFrames=%u\n", sums->tx_unicast_frames);
+ seq_printf(m, "TxMulticastframes=%u\n", sums->tx_multicast_frames);
+ seq_printf(m, "TxFragments=%u\n", sums->tx_fragments);
+ seq_printf(m, "TxUnicastOctets=%u\n", sums->tx_unicast_octets);
+ seq_printf(m, "TxMulticastOctets=%u\n", sums->tx_multicast_octets);
+ seq_printf(m, "TxDeferredTransmissions=%u\n",
+ sums->tx_deferred_transmissions);
+ seq_printf(m, "TxSingleRetryFrames=%u\n", sums->tx_single_retry_frames);
+ seq_printf(m, "TxMultipleRetryFrames=%u\n",
+ sums->tx_multiple_retry_frames);
+ seq_printf(m, "TxRetryLimitExceeded=%u\n",
+ sums->tx_retry_limit_exceeded);
+ seq_printf(m, "TxDiscards=%u\n", sums->tx_discards);
+ seq_printf(m, "RxUnicastFrames=%u\n", sums->rx_unicast_frames);
+ seq_printf(m, "RxMulticastFrames=%u\n", sums->rx_multicast_frames);
+ seq_printf(m, "RxFragments=%u\n", sums->rx_fragments);
+ seq_printf(m, "RxUnicastOctets=%u\n", sums->rx_unicast_octets);
+ seq_printf(m, "RxMulticastOctets=%u\n", sums->rx_multicast_octets);
+ seq_printf(m, "RxFCSErrors=%u\n", sums->rx_fcs_errors);
+ seq_printf(m, "RxDiscardsNoBuffer=%u\n", sums->rx_discards_no_buffer);
+ seq_printf(m, "TxDiscardsWrongSA=%u\n", sums->tx_discards_wrong_sa);
+ seq_printf(m, "RxDiscardsWEPUndecryptable=%u\n",
+ sums->rx_discards_wep_undecryptable);
+ seq_printf(m, "RxMessageInMsgFragments=%u\n",
+ sums->rx_message_in_msg_fragments);
+ seq_printf(m, "RxMessageInBadMsgFragments=%u\n",
+ sums->rx_message_in_bad_msg_fragments);
/* FIX: this may grow too long for one page(?) */
- return (p - page);
+ return 0;
+}
+
+static int prism2_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prism2_stats_proc_show, PDE_DATA(inode));
}
+static const struct file_operations prism2_stats_proc_fops = {
+ .open = prism2_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
-static int prism2_wds_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+
+static int prism2_wds_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
- struct list_head *ptr;
+ struct list_head *ptr = v;
struct hostap_interface *iface;
- if (off > PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
+ iface = list_entry(ptr, struct hostap_interface, list);
+ if (iface->type == HOSTAP_INTERFACE_WDS)
+ seq_printf(m, "%s\t%pM\n",
+ iface->dev->name, iface->u.wds.remote_addr);
+ return 0;
+}
+static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ local_info_t *local = m->private;
read_lock_bh(&local->iface_lock);
- list_for_each(ptr, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type != HOSTAP_INTERFACE_WDS)
- continue;
- p += sprintf(p, "%s\t%pM\n",
- iface->dev->name,
- iface->u.wds.remote_addr);
- if ((p - page) > PROC_LIMIT) {
- printk(KERN_DEBUG "%s: wds proc did not fit\n",
- local->dev->name);
- break;
- }
- }
- read_unlock_bh(&local->iface_lock);
+ return seq_list_start(&local->hostap_interfaces, *_pos);
+}
- if ((p - page) <= off) {
- *eof = 1;
- return 0;
- }
+static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ local_info_t *local = m->private;
+ return seq_list_next(v, &local->hostap_interfaces, _pos);
+}
- *start = page + off;
+static void prism2_wds_proc_stop(struct seq_file *m, void *v)
+{
+ local_info_t *local = m->private;
+ read_unlock_bh(&local->iface_lock);
+}
- return (p - page - off);
+static const struct seq_operations prism2_wds_proc_seqops = {
+ .start = prism2_wds_proc_start,
+ .next = prism2_wds_proc_next,
+ .stop = prism2_wds_proc_stop,
+ .show = prism2_wds_proc_show,
+};
+
+static int prism2_wds_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &prism2_wds_proc_seqops);
+ if (ret == 0) {
+ struct seq_file *m = file->private_data;
+ m->private = PDE_DATA(inode);
+ }
+ return ret;
}
+static const struct file_operations prism2_wds_proc_fops = {
+ .open = prism2_wds_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
-static int prism2_bss_list_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+
+static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
- struct list_head *ptr;
+ local_info_t *local = m->private;
+ struct list_head *ptr = v;
struct hostap_bss_info *bss;
int i;
- if (off > PROC_LIMIT) {
- *eof = 1;
+ if (ptr == &local->bss_list) {
+ seq_printf(m, "#BSSID\tlast_update\tcount\tcapab_info\tSSID(txt)\t"
+ "SSID(hex)\tWPA IE\n");
return 0;
}
- p += sprintf(p, "#BSSID\tlast_update\tcount\tcapab_info\tSSID(txt)\t"
- "SSID(hex)\tWPA IE\n");
+ bss = list_entry(ptr, struct hostap_bss_info, list);
+ seq_printf(m, "%pM\t%lu\t%u\t0x%x\t",
+ bss->bssid, bss->last_update,
+ bss->count, bss->capab_info);
+
+ for (i = 0; i < bss->ssid_len; i++)
+ seq_putc(m,bss->ssid[i] >= 32 && bss->ssid[i] < 127 ?
+ bss->ssid[i] : '_');
+
+ seq_putc(m, '\t');
+ for (i = 0; i < bss->ssid_len; i++)
+ seq_printf(m, "%02x", bss->ssid[i]);
+ seq_putc(m, '\t');
+ for (i = 0; i < bss->wpa_ie_len; i++)
+ seq_printf(m, "%02x", bss->wpa_ie[i]);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ local_info_t *local = m->private;
spin_lock_bh(&local->lock);
- list_for_each(ptr, &local->bss_list) {
- bss = list_entry(ptr, struct hostap_bss_info, list);
- p += sprintf(p, "%pM\t%lu\t%u\t0x%x\t",
- bss->bssid, bss->last_update,
- bss->count, bss->capab_info);
- for (i = 0; i < bss->ssid_len; i++) {
- p += sprintf(p, "%c",
- bss->ssid[i] >= 32 && bss->ssid[i] < 127 ?
- bss->ssid[i] : '_');
- }
- p += sprintf(p, "\t");
- for (i = 0; i < bss->ssid_len; i++) {
- p += sprintf(p, "%02x", bss->ssid[i]);
- }
- p += sprintf(p, "\t");
- for (i = 0; i < bss->wpa_ie_len; i++) {
- p += sprintf(p, "%02x", bss->wpa_ie[i]);
- }
- p += sprintf(p, "\n");
- if ((p - page) > PROC_LIMIT) {
- printk(KERN_DEBUG "%s: BSS proc did not fit\n",
- local->dev->name);
- break;
- }
- }
- spin_unlock_bh(&local->lock);
+ return seq_list_start_head(&local->bss_list, *_pos);
+}
- if ((p - page) <= off) {
- *eof = 1;
- return 0;
- }
+static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ local_info_t *local = m->private;
+ return seq_list_next(v, &local->bss_list, _pos);
+}
- *start = page + off;
+static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
+{
+ local_info_t *local = m->private;
+ spin_unlock_bh(&local->lock);
+}
+
+static const struct seq_operations prism2_bss_list_proc_seqops = {
+ .start = prism2_bss_list_proc_start,
+ .next = prism2_bss_list_proc_next,
+ .stop = prism2_bss_list_proc_stop,
+ .show = prism2_bss_list_proc_show,
+};
- return (p - page - off);
+static int prism2_bss_list_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &prism2_bss_list_proc_seqops);
+ if (ret == 0) {
+ struct seq_file *m = file->private_data;
+ m->private = PDE_DATA(inode);
+ }
+ return ret;
}
+static const struct file_operations prism2_bss_list_proc_fops = {
+ .open = prism2_bss_list_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
-static int prism2_crypt_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+
+static int prism2_crypt_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = m->private;
int i;
- if (off > PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- p += sprintf(p, "tx_keyidx=%d\n", local->crypt_info.tx_keyidx);
+ seq_printf(m, "tx_keyidx=%d\n", local->crypt_info.tx_keyidx);
for (i = 0; i < WEP_KEYS; i++) {
if (local->crypt_info.crypt[i] &&
local->crypt_info.crypt[i]->ops &&
local->crypt_info.crypt[i]->ops->print_stats) {
- p = local->crypt_info.crypt[i]->ops->print_stats(
- p, local->crypt_info.crypt[i]->priv);
+ local->crypt_info.crypt[i]->ops->print_stats(
+ m, local->crypt_info.crypt[i]->priv);
}
}
+ return 0;
+}
- if ((p - page) <= off) {
- *eof = 1;
- return 0;
- }
-
- *start = page + off;
-
- return (p - page - off);
+static int prism2_crypt_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prism2_crypt_proc_show, PDE_DATA(inode));
}
+static const struct file_operations prism2_crypt_proc_fops = {
+ .open = prism2_crypt_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
-static int prism2_pda_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+
+static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *_pos)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = PDE_DATA(file_inode(file));
+ size_t off;
- if (local->pda == NULL || off >= PRISM2_PDA_SIZE) {
- *eof = 1;
+ if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE)
return 0;
- }
- if (off + count > PRISM2_PDA_SIZE)
+ off = *_pos;
+ if (count > PRISM2_PDA_SIZE - off)
count = PRISM2_PDA_SIZE - off;
-
- memcpy(page, local->pda + off, count);
+ if (copy_to_user(buf, local->pda + off, count) != 0)
+ return -EFAULT;
+ *_pos += count;
return count;
}
+static const struct file_operations prism2_pda_proc_fops = {
+ .read = prism2_pda_proc_read,
+ .llseek = generic_file_llseek,
+};
-static int prism2_aux_dump_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- local_info_t *local = (local_info_t *) data;
-
- if (local->func->read_aux == NULL) {
- *eof = 1;
- return 0;
- }
- if (local->func->read_aux(local->dev, off, count, page)) {
- *eof = 1;
- return 0;
- }
- *start = page;
-
- return count;
+static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf,
+ size_t bufsize, loff_t *_pos)
+{
+ return 0;
}
+static const struct file_operations prism2_aux_dump_proc_fops = {
+ .read = prism2_aux_dump_proc_no_read,
+};
+
#ifdef PRISM2_IO_DEBUG
static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
@@ -306,82 +344,108 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
#ifndef PRISM2_NO_STATION_MODES
-static int prism2_scan_results_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- local_info_t *local = (local_info_t *) data;
- int entry, i, len, total = 0;
+ local_info_t *local = m->private;
+ unsigned long entry;
+ int i, len;
struct hfa384x_hostscan_result *scanres;
- u8 *pos;
+ u8 *p;
- p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates "
- "SSID\n");
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(m,
+ "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates SSID\n");
+ return 0;
+ }
+ entry = (unsigned long)v - 2;
+ scanres = &local->last_scan_results[entry];
+
+ seq_printf(m, "%d %d %d %d 0x%02x %d %pM %d ",
+ le16_to_cpu(scanres->chid),
+ (s16) le16_to_cpu(scanres->anl),
+ (s16) le16_to_cpu(scanres->sl),
+ le16_to_cpu(scanres->beacon_interval),
+ le16_to_cpu(scanres->capability),
+ le16_to_cpu(scanres->rate),
+ scanres->bssid,
+ le16_to_cpu(scanres->atim));
+
+ p = scanres->sup_rates;
+ for (i = 0; i < sizeof(scanres->sup_rates); i++) {
+ if (p[i] == 0)
+ break;
+ seq_printf(m, "<%02x>", p[i]);
+ }
+ seq_putc(m, ' ');
+
+ p = scanres->ssid;
+ len = le16_to_cpu(scanres->ssid_len);
+ if (len > 32)
+ len = 32;
+ for (i = 0; i < len; i++) {
+ unsigned char c = p[i];
+ if (c >= 32 && c < 127)
+ seq_putc(m, c);
+ else
+ seq_printf(m, "<%02x>", c);
+ }
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ local_info_t *local = m->private;
spin_lock_bh(&local->lock);
- for (entry = 0; entry < local->last_scan_results_count; entry++) {
- scanres = &local->last_scan_results[entry];
- if (total + (p - page) <= off) {
- total += p - page;
- p = page;
- }
- if (total + (p - page) > off + count)
- break;
- if ((p - page) > (PAGE_SIZE - 200))
- break;
+ /* We have a header (pos 0) + N results to show (pos 1...N) */
+ if (*_pos > local->last_scan_results_count)
+ return NULL;
+ return (void *)(unsigned long)(*_pos + 1); /* 0 would be EOF */
+}
- p += sprintf(p, "%d %d %d %d 0x%02x %d %pM %d ",
- le16_to_cpu(scanres->chid),
- (s16) le16_to_cpu(scanres->anl),
- (s16) le16_to_cpu(scanres->sl),
- le16_to_cpu(scanres->beacon_interval),
- le16_to_cpu(scanres->capability),
- le16_to_cpu(scanres->rate),
- scanres->bssid,
- le16_to_cpu(scanres->atim));
-
- pos = scanres->sup_rates;
- for (i = 0; i < sizeof(scanres->sup_rates); i++) {
- if (pos[i] == 0)
- break;
- p += sprintf(p, "<%02x>", pos[i]);
- }
- p += sprintf(p, " ");
-
- pos = scanres->ssid;
- len = le16_to_cpu(scanres->ssid_len);
- if (len > 32)
- len = 32;
- for (i = 0; i < len; i++) {
- unsigned char c = pos[i];
- if (c >= 32 && c < 127)
- p += sprintf(p, "%c", c);
- else
- p += sprintf(p, "<%02x>", c);
- }
- p += sprintf(p, "\n");
- }
+static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ local_info_t *local = m->private;
+
+ ++*_pos;
+ if (*_pos > local->last_scan_results_count)
+ return NULL;
+ return (void *)(unsigned long)(*_pos + 1); /* 0 would be EOF */
+}
+
+static void prism2_scan_results_proc_stop(struct seq_file *m, void *v)
+{
+ local_info_t *local = m->private;
spin_unlock_bh(&local->lock);
+}
- total += (p - page);
- if (total >= off + count)
- *eof = 1;
+static const struct seq_operations prism2_scan_results_proc_seqops = {
+ .start = prism2_scan_results_proc_start,
+ .next = prism2_scan_results_proc_next,
+ .stop = prism2_scan_results_proc_stop,
+ .show = prism2_scan_results_proc_show,
+};
- if (total < off) {
- *eof = 1;
- return 0;
+static int prism2_scan_results_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &prism2_scan_results_proc_seqops);
+ if (ret == 0) {
+ struct seq_file *m = file->private_data;
+ m->private = PDE_DATA(inode);
}
+ return ret;
+}
+
+static const struct file_operations prism2_scan_results_proc_fops = {
+ .open = prism2_scan_results_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
- len = total - off;
- if (len > (p - page))
- len = p - page;
- *start = p - len;
- if (len > count)
- len = count;
- return len;
-}
#endif /* PRISM2_NO_STATION_MODES */
@@ -403,53 +467,36 @@ void hostap_init_proc(local_info_t *local)
}
#ifndef PRISM2_NO_PROCFS_DEBUG
- create_proc_read_entry("debug", 0, local->proc,
- prism2_debug_proc_read, local);
+ proc_create_data("debug", 0, local->proc,
+ &prism2_debug_proc_fops, local);
#endif /* PRISM2_NO_PROCFS_DEBUG */
- create_proc_read_entry("stats", 0, local->proc,
- prism2_stats_proc_read, local);
- create_proc_read_entry("wds", 0, local->proc,
- prism2_wds_proc_read, local);
- create_proc_read_entry("pda", 0, local->proc,
- prism2_pda_proc_read, local);
- create_proc_read_entry("aux_dump", 0, local->proc,
- prism2_aux_dump_proc_read, local);
- create_proc_read_entry("bss_list", 0, local->proc,
- prism2_bss_list_proc_read, local);
- create_proc_read_entry("crypt", 0, local->proc,
- prism2_crypt_proc_read, local);
+ proc_create_data("stats", 0, local->proc,
+ &prism2_stats_proc_fops, local);
+ proc_create_data("wds", 0, local->proc,
+ &prism2_wds_proc_fops, local);
+ proc_create_data("pda", 0, local->proc,
+ &prism2_pda_proc_fops, local);
+ proc_create_data("aux_dump", 0, local->proc,
+ local->func->read_aux_fops ?: &prism2_aux_dump_proc_fops,
+ local);
+ proc_create_data("bss_list", 0, local->proc,
+ &prism2_bss_list_proc_fops, local);
+ proc_create_data("crypt", 0, local->proc,
+ &prism2_crypt_proc_fops, local);
#ifdef PRISM2_IO_DEBUG
- create_proc_read_entry("io_debug", 0, local->proc,
- prism2_io_debug_proc_read, local);
+ proc_create_data("io_debug", 0, local->proc,
+ &prism2_io_debug_proc_fops, local);
#endif /* PRISM2_IO_DEBUG */
#ifndef PRISM2_NO_STATION_MODES
- create_proc_read_entry("scan_results", 0, local->proc,
- prism2_scan_results_proc_read, local);
+ proc_create_data("scan_results", 0, local->proc,
+ &prism2_scan_results_proc_fops, local);
#endif /* PRISM2_NO_STATION_MODES */
}
void hostap_remove_proc(local_info_t *local)
{
- if (local->proc != NULL) {
-#ifndef PRISM2_NO_STATION_MODES
- remove_proc_entry("scan_results", local->proc);
-#endif /* PRISM2_NO_STATION_MODES */
-#ifdef PRISM2_IO_DEBUG
- remove_proc_entry("io_debug", local->proc);
-#endif /* PRISM2_IO_DEBUG */
- remove_proc_entry("pda", local->proc);
- remove_proc_entry("aux_dump", local->proc);
- remove_proc_entry("wds", local->proc);
- remove_proc_entry("stats", local->proc);
- remove_proc_entry("bss_list", local->proc);
- remove_proc_entry("crypt", local->proc);
-#ifndef PRISM2_NO_PROCFS_DEBUG
- remove_proc_entry("debug", local->proc);
-#endif /* PRISM2_NO_PROCFS_DEBUG */
- if (hostap_proc != NULL)
- remove_proc_entry(local->proc->name, hostap_proc);
- }
+ remove_proc_subtree(local->ddev->name, hostap_proc);
}
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 7bb0b4b..5790401 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -596,8 +596,7 @@ struct prism2_helper_functions {
struct prism2_download_param *param);
int (*tx)(struct sk_buff *skb, struct net_device *dev);
int (*set_tim)(struct net_device *dev, int aid, int set);
- int (*read_aux)(struct net_device *dev, unsigned addr, int len,
- u8 *buf);
+ const struct file_operations *read_aux_fops;
int need_tx_headroom; /* number of bytes of headroom needed before
* IEEE 802.11 header */
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a0cb077..d3c8ece 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -216,7 +216,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mwifiex_form_mgmt_frame(skb, buf, len);
mwifiex_queue_tx_pkt(priv, skb);
- *cookie = random32() | 1;
+ *cookie = prandom_u32() | 1;
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
wiphy_dbg(wiphy, "info: management frame transmitted\n");
@@ -271,7 +271,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
duration);
if (!ret) {
- *cookie = random32() | 1;
+ *cookie = prandom_u32() | 1;
priv->roc_cfg.cookie = *cookie;
priv->roc_cfg.chan = *chan;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index ebada81..9b557a1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2778,7 +2778,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
nr = nr * 10 + c;
p++;
} while (--len);
- *(int *)PDE(file_inode(file))->data = nr;
+ *(int *)PDE_DATA(file_inode(file)) = nr;
return count;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a2865f1..37984e6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -51,9 +51,17 @@
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
*/
-#define MAX_SKB_SLOTS_DEFAULT 20
-static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
-module_param(max_skb_slots, uint, 0444);
+#define FATAL_SKB_SLOTS_DEFAULT 20
+static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
+module_param(fatal_skb_slots, uint, 0444);
+
+/*
+ * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
+ * the maximum slots a valid packet can use. Now this value is defined
+ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
+ * all backend.
+ */
+#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
@@ -928,18 +936,20 @@ static void netbk_fatal_tx_err(struct xenvif *vif)
static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
- RING_IDX first_idx,
struct xen_netif_tx_request *txp,
int work_to_do)
{
RING_IDX cons = vif->tx.req_cons;
int slots = 0;
int drop_err = 0;
+ int more_data;
if (!(first->flags & XEN_NETTXF_more_data))
return 0;
do {
+ struct xen_netif_tx_request dropped_tx = { 0 };
+
if (slots >= work_to_do) {
netdev_err(vif->dev,
"Asked for %d slots but exceeds this limit\n",
@@ -951,28 +961,32 @@ static int netbk_count_requests(struct xenvif *vif,
/* This guest is really using too many slots and
* considered malicious.
*/
- if (unlikely(slots >= max_skb_slots)) {
+ if (unlikely(slots >= fatal_skb_slots)) {
netdev_err(vif->dev,
"Malicious frontend using %d slots, threshold %u\n",
- slots, max_skb_slots);
+ slots, fatal_skb_slots);
netbk_fatal_tx_err(vif);
return -E2BIG;
}
/* Xen network protocol had implicit dependency on
- * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
- * historical MAX_SKB_FRAGS value 18 to honor the same
- * behavior as before. Any packet using more than 18
- * slots but less than max_skb_slots slots is dropped
+ * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
+ * the historical MAX_SKB_FRAGS value 18 to honor the
+ * same behavior as before. Any packet using more than
+ * 18 slots but less than fatal_skb_slots slots is
+ * dropped
*/
- if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
+ if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
if (net_ratelimit())
netdev_dbg(vif->dev,
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
- slots, XEN_NETIF_NR_SLOTS_MIN);
+ slots, XEN_NETBK_LEGACY_SLOTS_MAX);
drop_err = -E2BIG;
}
+ if (drop_err)
+ txp = &dropped_tx;
+
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
sizeof(*txp));
@@ -1002,10 +1016,16 @@ static int netbk_count_requests(struct xenvif *vif,
netbk_fatal_tx_err(vif);
return -EINVAL;
}
- } while ((txp++)->flags & XEN_NETTXF_more_data);
+
+ more_data = txp->flags & XEN_NETTXF_more_data;
+
+ if (!drop_err)
+ txp++;
+
+ } while (more_data);
if (drop_err) {
- netbk_tx_err(vif, first, first_idx + slots);
+ netbk_tx_err(vif, first, cons + slots);
return drop_err;
}
@@ -1042,7 +1062,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
struct pending_tx_info *first = NULL;
/* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
nr_slots = shinfo->nr_frags;
@@ -1404,12 +1424,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
struct sk_buff *skb;
int ret;
- while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
+ while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
< MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list)) {
struct xenvif *vif;
struct xen_netif_tx_request txreq;
- struct xen_netif_tx_request txfrags[max_skb_slots];
+ struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
@@ -1470,8 +1490,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
continue;
}
- ret = netbk_count_requests(vif, &txreq, idx,
- txfrags, work_to_do);
+ ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
continue;
@@ -1498,7 +1517,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
pending_idx = netbk->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
- ret < XEN_NETIF_NR_SLOTS_MIN) ?
+ ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
@@ -1777,7 +1796,7 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
static inline int tx_work_todo(struct xen_netbk *netbk)
{
- if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
+ if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
< MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list))
return 1;
@@ -1862,11 +1881,11 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
- if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
+ if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
printk(KERN_INFO
- "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
- max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
- max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
+ "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
+ fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
+ fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
}
xen_netbk_group_nr = num_online_cpus();
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 44d01af..43926cd 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -19,7 +19,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/hwtest.h>
-#include <linux/proc_fs.h>
#include <asm/mac_via.h>
#include <asm/mac_oss.h>
@@ -954,56 +953,6 @@ void __init nubus_probe_slot(int slot)
}
}
-#if defined(CONFIG_PROC_FS)
-
-/* /proc/nubus stuff */
-
-static int sprint_nubus_board(struct nubus_board* board, char* ptr, int len)
-{
- if(len < 100)
- return -1;
-
- sprintf(ptr, "Slot %X: %s\n",
- board->slot, board->name);
-
- return strlen(ptr);
-}
-
-static int nubus_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int nprinted, len, begin = 0;
- int size = PAGE_SIZE;
- struct nubus_board* board;
-
- len = sprintf(page, "Nubus devices found:\n");
- /* Walk the list of NuBus boards */
- for (board = nubus_boards; board != NULL; board = board->next)
- {
- nprinted = sprint_nubus_board(board, page + len, size - len);
- if (nprinted < 0)
- break;
- len += nprinted;
- if (len+begin < off) {
- begin += len;
- len = 0;
- }
- if (len+begin >= off+count)
- break;
- }
- if (len+begin < off)
- *eof = 1;
- off -= begin;
- *start = page + off;
- len -= off;
- if (len>count)
- len = count;
- if (len<0)
- len = 0;
- return len;
-}
-#endif
-
void __init nubus_scan_bus(void)
{
int slot;
@@ -1041,11 +990,7 @@ static int __init nubus_init(void)
nubus_devices = NULL;
nubus_boards = NULL;
nubus_scan_bus();
-
-#ifdef CONFIG_PROC_FS
- create_proc_read_entry("nubus", 0, NULL, nubus_read_proc, NULL);
nubus_proc_init();
-#endif
return 0;
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 208dd12..5371b37 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -52,7 +52,6 @@ static int nubus_devices_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations nubus_devices_proc_fops = {
- .owner = THIS_MODULE,
.open = nubus_devices_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -61,6 +60,10 @@ static const struct file_operations nubus_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_nubus_dir;
+static const struct file_operations nubus_proc_subdir_fops = {
+#warning Need to set some I/O handlers here
+};
+
static void nubus_proc_subdir(struct nubus_dev* dev,
struct proc_dir_entry* parent,
struct nubus_dir* dir)
@@ -73,9 +76,10 @@ static void nubus_proc_subdir(struct nubus_dev* dev,
struct proc_dir_entry* e;
sprintf(name, "%x", ent.type);
- e = create_proc_entry(name, S_IFREG | S_IRUGO |
- S_IWUSR, parent);
- if (!e) return;
+ e = proc_create(name, S_IFREG | S_IRUGO | S_IWUSR, parent,
+ &nubus_proc_subdir_fops);
+ if (!e)
+ return;
}
}
@@ -143,20 +147,72 @@ int nubus_proc_attach_device(struct nubus_dev *dev)
}
EXPORT_SYMBOL(nubus_proc_attach_device);
-/* FIXME: this is certainly broken! */
-int nubus_proc_detach_device(struct nubus_dev *dev)
+/*
+ * /proc/nubus stuff
+ */
+static int nubus_proc_show(struct seq_file *m, void *v)
{
- struct proc_dir_entry *e;
+ const struct nubus_board *board = v;
- if ((e = dev->procdir)) {
- if (atomic_read(&e->count))
- return -EBUSY;
- remove_proc_entry(e->name, proc_bus_nubus_dir);
- dev->procdir = NULL;
- }
+ /* Display header on line 1 */
+ if (v == SEQ_START_TOKEN)
+ seq_puts(m, "Nubus devices found:\n");
+ else
+ seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
return 0;
}
-EXPORT_SYMBOL(nubus_proc_detach_device);
+
+static void *nubus_proc_start(struct seq_file *m, loff_t *_pos)
+{
+ struct nubus_board *board;
+ unsigned pos;
+
+ if (*_pos > LONG_MAX)
+ return NULL;
+ pos = *_pos;
+ if (pos == 0)
+ return SEQ_START_TOKEN;
+ for (board = nubus_boards; board; board = board->next)
+ if (--pos == 0)
+ break;
+ return board;
+}
+
+static void *nubus_proc_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ /* Walk the list of NuBus boards */
+ struct nubus_board *board = v;
+
+ ++*_pos;
+ if (v == SEQ_START_TOKEN)
+ board = nubus_boards;
+ else if (board)
+ board = board->next;
+ return board;
+}
+
+static void nubus_proc_stop(struct seq_file *p, void *v)
+{
+}
+
+static const struct seq_operations nubus_proc_seqops = {
+ .start = nubus_proc_start,
+ .next = nubus_proc_next,
+ .stop = nubus_proc_stop,
+ .show = nubus_proc_show,
+};
+
+static int nubus_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &nubus_proc_seqops);
+}
+
+static const struct file_operations nubus_proc_fops = {
+ .open = nubus_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
void __init proc_bus_nubus_add_devices(void)
{
@@ -168,6 +224,7 @@ void __init proc_bus_nubus_add_devices(void)
void __init nubus_proc_init(void)
{
+ proc_create("nubus", 0, NULL, &nubus_proc_fops);
if (!MACH_IS_MAC)
return;
proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 1733081..c76d16c 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -747,6 +747,64 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_find_property_value_of_size
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @len: requested length of property value
+ *
+ * Search for a property in a device node and valid the requested size.
+ * Returns the property value on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ */
+static void *of_find_property_value_of_size(const struct device_node *np,
+ const char *propname, u32 len)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+ if (!prop->value)
+ return ERR_PTR(-ENODATA);
+ if (len > prop->length)
+ return ERR_PTR(-EOVERFLOW);
+
+ return prop->value;
+}
+
+/**
+ * of_property_read_u32_index - Find and read a u32 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u32 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 32-bit value from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_u32_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u32 *out_value)
+{
+ const u32 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)));
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be32_to_cpup(((__be32 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u32_index);
+
+/**
* of_property_read_u8_array - Find and read an array of u8 from a property.
*
* @np: device node from which the property value is to be read.
@@ -767,17 +825,12 @@ EXPORT_SYMBOL(of_find_node_by_phandle);
int of_property_read_u8_array(const struct device_node *np,
const char *propname, u8 *out_values, size_t sz)
{
- struct property *prop = of_find_property(np, propname, NULL);
- const u8 *val;
+ const u8 *val = of_find_property_value_of_size(np, propname,
+ (sz * sizeof(*out_values)));
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
- if ((sz * sizeof(*out_values)) > prop->length)
- return -EOVERFLOW;
+ if (IS_ERR(val))
+ return PTR_ERR(val);
- val = prop->value;
while (sz--)
*out_values++ = *val++;
return 0;
@@ -805,17 +858,12 @@ EXPORT_SYMBOL_GPL(of_property_read_u8_array);
int of_property_read_u16_array(const struct device_node *np,
const char *propname, u16 *out_values, size_t sz)
{
- struct property *prop = of_find_property(np, propname, NULL);
- const __be16 *val;
+ const __be16 *val = of_find_property_value_of_size(np, propname,
+ (sz * sizeof(*out_values)));
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
- if ((sz * sizeof(*out_values)) > prop->length)
- return -EOVERFLOW;
+ if (IS_ERR(val))
+ return PTR_ERR(val);
- val = prop->value;
while (sz--)
*out_values++ = be16_to_cpup(val++);
return 0;
@@ -842,17 +890,12 @@ int of_property_read_u32_array(const struct device_node *np,
const char *propname, u32 *out_values,
size_t sz)
{
- struct property *prop = of_find_property(np, propname, NULL);
- const __be32 *val;
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ (sz * sizeof(*out_values)));
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
- if ((sz * sizeof(*out_values)) > prop->length)
- return -EOVERFLOW;
+ if (IS_ERR(val))
+ return PTR_ERR(val);
- val = prop->value;
while (sz--)
*out_values++ = be32_to_cpup(val++);
return 0;
@@ -875,15 +918,13 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_array);
int of_property_read_u64(const struct device_node *np, const char *propname,
u64 *out_value)
{
- struct property *prop = of_find_property(np, propname, NULL);
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ sizeof(*out_value));
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
- if (sizeof(*out_value) > prop->length)
- return -EOVERFLOW;
- *out_value = of_read_number(prop->value, 2);
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = of_read_number(val, 2);
return 0;
}
EXPORT_SYMBOL_GPL(of_property_read_u64);
@@ -1453,16 +1494,7 @@ int of_attach_node(struct device_node *np)
#ifdef CONFIG_PROC_DEVICETREE
static void of_remove_proc_dt_entry(struct device_node *dn)
{
- struct device_node *parent = dn->parent;
- struct property *prop = dn->properties;
-
- while (prop) {
- remove_proc_entry(prop->name, dn->pde);
- prop = prop->next;
- }
-
- if (dn->pde)
- remove_proc_entry(dn->pde->name, parent->pde);
+ proc_remove(dn->pde);
}
#else
static void of_remove_proc_dt_entry(struct device_node *dn)
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index d4d800c..b482431 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -172,14 +172,14 @@ static int led_proc_show(struct seq_file *m, void *v)
static int led_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, led_proc_show, PDE(inode)->data);
+ return single_open(file, led_proc_show, PDE_DATA(inode));
}
static ssize_t led_proc_write(struct file *file, const char *buf,
size_t count, loff_t *pos)
{
- void *data = PDE(file_inode(file))->data;
+ void *data = PDE_DATA(file_inode(file));
char *cur, lbuf[32];
int d;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 42cfcd9..1ff1b67 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -575,7 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
mtsp(sid,1);
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
- pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */
+ pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
@@ -1376,7 +1376,7 @@ static void
sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
{
u32 iova_space_size, iova_space_mask;
- unsigned int pdir_size, iov_order;
+ unsigned int pdir_size, iov_order, tcnfg;
/*
** Determine IOVA Space size from memory size.
@@ -1468,8 +1468,19 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
- /* Set I/O PDIR Page size to 4K */
- WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
+ /* Set I/O PDIR Page size to system page size */
+ switch (PAGE_SHIFT) {
+ case 12: tcnfg = 0; break; /* 4K */
+ case 13: tcnfg = 1; break; /* 8K */
+ case 14: tcnfg = 2; break; /* 16K */
+ case 16: tcnfg = 3; break; /* 64K */
+ default:
+ panic(__FILE__ "Unsupported system page size %d",
+ 1 << PAGE_SHIFT);
+ break;
+ }
+ /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */
+ WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
/*
** Clear I/O TLB of any possible entries.
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 43ece5d..631aeb7 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -990,7 +990,6 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->dev.type = &pci_dev_type;
dev->hdr_type = hdr_type & 0x7f;
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
@@ -1210,6 +1209,7 @@ struct pci_dev *alloc_pci_dev(void)
return NULL;
INIT_LIST_HEAD(&dev->bus_list);
+ dev->dev.type = &pci_dev_type;
return dev;
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 0b00947..0812608 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -46,9 +46,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- const struct inode *ino = file_inode(file);
- const struct proc_dir_entry *dp = PDE(ino);
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
unsigned int pos = *ppos;
unsigned int cnt, size;
@@ -59,7 +57,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
*/
if (capable(CAP_SYS_ADMIN))
- size = dp->size;
+ size = dev->cfg_size;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
else
@@ -133,10 +131,9 @@ static ssize_t
proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
{
struct inode *ino = file_inode(file);
- const struct proc_dir_entry *dp = PDE(ino);
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(ino);
int pos = *ppos;
- int size = dp->size;
+ int size = dev->cfg_size;
int cnt;
if (pos >= size)
@@ -200,7 +197,7 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
pci_config_pm_runtime_put(dev);
*ppos = pos;
- i_size_write(ino, dp->size);
+ i_size_write(ino, dev->cfg_size);
return nbytes;
}
@@ -212,8 +209,7 @@ struct pci_filp_private {
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const struct proc_dir_entry *dp = PDE(file_inode(file));
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
#ifdef HAVE_PCI_MMAP
struct pci_filp_private *fpriv = file->private_data;
#endif /* HAVE_PCI_MMAP */
@@ -253,9 +249,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file_inode(file);
- const struct proc_dir_entry *dp = PDE(inode);
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
int i, ret;
@@ -425,7 +419,7 @@ int pci_proc_attach_device(struct pci_dev *dev)
&proc_bus_pci_operations, dev);
if (!e)
return -ENOMEM;
- e->size = dev->cfg_size;
+ proc_set_size(e, dev->cfg_size);
dev->procent = e;
return 0;
@@ -433,20 +427,14 @@ int pci_proc_attach_device(struct pci_dev *dev)
int pci_proc_detach_device(struct pci_dev *dev)
{
- struct proc_dir_entry *e;
-
- if ((e = dev->procent)) {
- remove_proc_entry(e->name, dev->bus->procdir);
- dev->procent = NULL;
- }
+ proc_remove(dev->procent);
+ dev->procent = NULL;
return 0;
}
int pci_proc_detach_bus(struct pci_bus* bus)
{
- struct proc_dir_entry *de = bus->procdir;
- if (de)
- remove_proc_entry(de->name, proc_bus_pci_dir);
+ proc_remove(bus->procdir);
return 0;
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index f910962..8f66924 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -93,12 +93,20 @@ config PINCTRL_IMX53
Say Y here to enable the imx53 pinctrl driver
config PINCTRL_IMX6Q
- bool "IMX6Q pinctrl driver"
+ bool "IMX6Q/DL pinctrl driver"
depends on OF
depends on SOC_IMX6Q
select PINCTRL_IMX
help
- Say Y here to enable the imx6q pinctrl driver
+ Say Y here to enable the imx6q/dl pinctrl driver
+
+config PINCTRL_IMX6SL
+ bool "IMX6SL pinctrl driver"
+ depends on OF
+ depends on SOC_IMX6SL
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx6sl pinctrl driver
config PINCTRL_LANTIQ
bool
@@ -216,6 +224,7 @@ config PINCTRL_S3C64XX
source "drivers/pinctrl/mvebu/Kconfig"
source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
+source "drivers/pinctrl/vt8500/Kconfig"
config PINCTRL_XWAY
bool
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 988279a..9bdaeb8 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o
obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o
obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o
obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o
+obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6dl.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
@@ -49,3 +50,4 @@ obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/
obj-$(CONFIG_SUPERH) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
+obj-$(CONFIG_ARCH_VT8500) += vt8500/
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3d222e..5327f35 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -129,7 +129,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
return pctldev;
}
- mutex_lock(&pinctrldev_list_mutex);
+ mutex_unlock(&pinctrldev_list_mutex);
return NULL;
}
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 4d7f531..5d7529e 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pinctrl/machine.h>
@@ -27,8 +28,6 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-#include <asm/mach/irq.h>
-
#include <mach/hardware.h>
#include <mach/at91_pio.h>
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index f28d4b0..c8f20a3 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -699,11 +699,6 @@ static int bcm2835_pctl_dt_node_to_map_pull(struct bcm2835_pinctrl *pc,
return 0;
}
-static inline u32 prop_u32(struct property *p, int i)
-{
- return be32_to_cpup(((__be32 *)p->value) + i);
-}
-
static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
struct pinctrl_map **map, unsigned *num_maps)
@@ -761,7 +756,9 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
return -ENOMEM;
for (i = 0; i < num_pins; i++) {
- pin = prop_u32(pins, i);
+ err = of_property_read_u32_index(np, "brcm,pins", i, &pin);
+ if (err)
+ goto out;
if (pin >= ARRAY_SIZE(bcm2835_gpio_pins)) {
dev_err(pc->dev, "%s: invalid brcm,pins value %d\n",
of_node_full_name(np), pin);
@@ -770,14 +767,20 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (num_funcs) {
- func = prop_u32(funcs, (num_funcs > 1) ? i : 0);
+ err = of_property_read_u32_index(np, "brcm,function",
+ (num_funcs > 1) ? i : 0, &func);
+ if (err)
+ goto out;
err = bcm2835_pctl_dt_node_to_map_func(pc, np, pin,
func, &cur_map);
if (err)
goto out;
}
if (num_pulls) {
- pull = prop_u32(pulls, (num_pulls > 1) ? i : 0);
+ err = of_property_read_u32_index(np, "brcm,pull",
+ (num_funcs > 1) ? i : 0, &pull);
+ if (err)
+ goto out;
err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
pull, &cur_map);
if (err)
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 8b10b1a..ac74281 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -23,14 +23,13 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/err.h>
-#include <asm/mach/irq.h>
-
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -701,3 +700,111 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
.label = "exynos4x12-gpio-ctrl3",
},
};
+
+/* pin banks of exynos5250 pin-controller 0 */
+static struct samsung_pin_bank exynos5250_pin_banks0[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0E0, "gpc0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc2", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc3", 0x28),
+ EXYNOS_PIN_BANK_EINTG(4, 0x160, "gpd0", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x30),
+ EXYNOS_PIN_BANK_EINTG(7, 0x2E0, "gpc4", 0x34),
+ EXYNOS_PIN_BANK_EINTN(6, 0x1A0, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x1C0, "gpy1"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x1E0, "gpy2"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x200, "gpy3"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x220, "gpy4"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x240, "gpy5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x260, "gpy6"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5250 pin-controller 1 */
+static struct samsung_pin_bank exynos5250_pin_banks1[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpg0", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpg1", 0x14),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpg2", 0x18),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0E0, "gph0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gph1", 0x20),
+};
+
+/* pin banks of exynos5250 pin-controller 2 */
+static struct samsung_pin_bank exynos5250_pin_banks2[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpv3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpv4", 0x10),
+};
+
+/* pin banks of exynos5250 pin-controller 3 */
+static struct samsung_pin_bank exynos5250_pin_banks3[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
+ * four gpio/pin-mux/pinconfig controllers.
+ */
+struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5250_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks0),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .weint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .label = "exynos5250-gpio-ctrl0",
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5250_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks1),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5250-gpio-ctrl1",
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5250_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks2),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5250-gpio-ctrl2",
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos5250_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks3),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5250-gpio-ctrl3",
+ },
+};
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index 0ef1904..4fcfff92 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -54,32 +54,6 @@ struct imx_pinctrl {
const struct imx_pinctrl_soc_info *info;
};
-static const struct imx_pin_reg *imx_find_pin_reg(
- const struct imx_pinctrl_soc_info *info,
- unsigned pin, bool is_mux, unsigned mux)
-{
- const struct imx_pin_reg *pin_reg = NULL;
- int i;
-
- for (i = 0; i < info->npin_regs; i++) {
- pin_reg = &info->pin_regs[i];
- if (pin_reg->pid != pin)
- continue;
- if (!is_mux)
- break;
- else if (pin_reg->mux_mode == (mux & IMX_MUX_MASK))
- break;
- }
-
- if (i == info->npin_regs) {
- dev_err(info->dev, "Pin(%s): unable to find pin reg map\n",
- info->pins[pin].name);
- return NULL;
- }
-
- return pin_reg;
-}
-
static const inline struct imx_pin_group *imx_pinctrl_find_group_by_name(
const struct imx_pinctrl_soc_info *info,
const char *name)
@@ -223,7 +197,8 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
- const unsigned *pins, *mux;
+ const unsigned *pins, *mux, *input_val;
+ u16 *input_reg;
unsigned int npins, pin_id;
int i;
@@ -234,18 +209,17 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
pins = info->groups[group].pins;
npins = info->groups[group].npins;
mux = info->groups[group].mux_mode;
+ input_val = info->groups[group].input_val;
+ input_reg = info->groups[group].input_reg;
- WARN_ON(!pins || !npins || !mux);
+ WARN_ON(!pins || !npins || !mux || !input_val || !input_reg);
dev_dbg(ipctl->dev, "enable function %s group %s\n",
info->functions[selector].name, info->groups[group].name);
for (i = 0; i < npins; i++) {
pin_id = pins[i];
-
- pin_reg = imx_find_pin_reg(info, pin_id, 1, mux[i]);
- if (!pin_reg)
- return -EINVAL;
+ pin_reg = &info->pin_regs[pin_id];
if (!pin_reg->mux_reg) {
dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
@@ -258,11 +232,11 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
pin_reg->mux_reg, mux[i]);
/* some pins also need select input setting, set it if found */
- if (pin_reg->input_reg) {
- writel(pin_reg->input_val, ipctl->base + pin_reg->input_reg);
+ if (input_reg[i]) {
+ writel(input_val[i], ipctl->base + input_reg[i]);
dev_dbg(ipctl->dev,
"==>select_input: offset 0x%x val 0x%x\n",
- pin_reg->input_reg, pin_reg->input_val);
+ input_reg[i], input_val[i]);
}
}
@@ -311,11 +285,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
- const struct imx_pin_reg *pin_reg;
-
- pin_reg = imx_find_pin_reg(info, pin_id, 0, 0);
- if (!pin_reg)
- return -EINVAL;
+ const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
if (!pin_reg->conf_reg) {
dev_err(info->dev, "Pin(%s) does not support config function\n",
@@ -333,11 +303,7 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
- const struct imx_pin_reg *pin_reg;
-
- pin_reg = imx_find_pin_reg(info, pin_id, 0, 0);
- if (!pin_reg)
- return -EINVAL;
+ const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
if (!pin_reg->conf_reg) {
dev_err(info->dev, "Pin(%s) does not support config function\n",
@@ -360,10 +326,9 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
- const struct imx_pin_reg *pin_reg;
+ const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
unsigned long config;
- pin_reg = imx_find_pin_reg(info, pin_id, 0, 0);
if (!pin_reg || !pin_reg->conf_reg) {
seq_printf(s, "N/A");
return;
@@ -411,29 +376,20 @@ static struct pinctrl_desc imx_pinctrl_desc = {
.owner = THIS_MODULE,
};
-/* decode pin id and mux from pin function id got from device tree*/
-static int imx_pinctrl_get_pin_id_and_mux(const struct imx_pinctrl_soc_info *info,
- unsigned int pin_func_id, unsigned int *pin_id,
- unsigned int *mux)
-{
- if (pin_func_id > info->npin_regs)
- return -EINVAL;
-
- *pin_id = info->pin_regs[pin_func_id].pid;
- *mux = info->pin_regs[pin_func_id].mux_mode;
-
- return 0;
-}
+/*
+ * Each pin represented in fsl,pins consists of 5 u32 PIN_FUNC_ID and
+ * 1 u32 CONFIG, so 24 types in total for each pin.
+ */
+#define FSL_PIN_SIZE 24
static int imx_pinctrl_parse_groups(struct device_node *np,
struct imx_pin_group *grp,
struct imx_pinctrl_soc_info *info,
u32 index)
{
- unsigned int pin_func_id;
- int ret, size;
+ int size;
const __be32 *list;
- int i, j;
+ int i;
u32 config;
dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
@@ -447,32 +403,40 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
*/
list = of_get_property(np, "fsl,pins", &size);
/* we do not check return since it's safe node passed down */
- size /= sizeof(*list);
- if (!size || size % 2) {
- dev_err(info->dev, "wrong pins number or pins and configs should be pairs\n");
+ if (!size || size % FSL_PIN_SIZE) {
+ dev_err(info->dev, "Invalid fsl,pins property\n");
return -EINVAL;
}
- grp->npins = size / 2;
+ grp->npins = size / FSL_PIN_SIZE;
grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
GFP_KERNEL);
grp->mux_mode = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
GFP_KERNEL);
+ grp->input_reg = devm_kzalloc(info->dev, grp->npins * sizeof(u16),
+ GFP_KERNEL);
+ grp->input_val = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
+ GFP_KERNEL);
grp->configs = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned long),
GFP_KERNEL);
- for (i = 0, j = 0; i < size; i += 2, j++) {
- pin_func_id = be32_to_cpu(*list++);
- ret = imx_pinctrl_get_pin_id_and_mux(info, pin_func_id,
- &grp->pins[j], &grp->mux_mode[j]);
- if (ret) {
- dev_err(info->dev, "get invalid pin function id\n");
- return -EINVAL;
- }
+ for (i = 0; i < grp->npins; i++) {
+ u32 mux_reg = be32_to_cpu(*list++);
+ u32 conf_reg = be32_to_cpu(*list++);
+ unsigned int pin_id = mux_reg ? mux_reg / 4 : conf_reg / 4;
+ struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
+
+ grp->pins[i] = pin_id;
+ pin_reg->mux_reg = mux_reg;
+ pin_reg->conf_reg = conf_reg;
+ grp->input_reg[i] = be32_to_cpu(*list++);
+ grp->mux_mode[i] = be32_to_cpu(*list++);
+ grp->input_val[i] = be32_to_cpu(*list++);
+
/* SION bit is in mux register */
config = be32_to_cpu(*list++);
if (config & IMX_PAD_SION)
- grp->mux_mode[j] |= IOMUXC_CONFIG_SION;
- grp->configs[j] = config & ~IMX_PAD_SION;
+ grp->mux_mode[i] |= IOMUXC_CONFIG_SION;
+ grp->configs[i] = config & ~IMX_PAD_SION;
}
#ifdef DEBUG
@@ -568,8 +532,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
struct resource *res;
int ret;
- if (!info || !info->pins || !info->npins
- || !info->pin_regs || !info->npin_regs) {
+ if (!info || !info->pins || !info->npins) {
dev_err(&pdev->dev, "wrong pinctrl info\n");
return -EINVAL;
}
@@ -580,6 +543,11 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (!ipctl)
return -ENOMEM;
+ info->pin_regs = devm_kzalloc(&pdev->dev, sizeof(*info->pin_regs) *
+ info->npins, GFP_KERNEL);
+ if (!info->pin_regs)
+ return -ENOMEM;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
diff --git a/drivers/pinctrl/pinctrl-imx.h b/drivers/pinctrl/pinctrl-imx.h
index 9b65e78..607ef54 100644
--- a/drivers/pinctrl/pinctrl-imx.h
+++ b/drivers/pinctrl/pinctrl-imx.h
@@ -26,6 +26,10 @@ struct platform_device;
* elements in .pins so we can iterate over that array
* @mux_mode: the mux mode for each pin in this group. The size of this
* array is the same as pins.
+ * @input_reg: select input register offset for this mux if any
+ * 0 if no select input setting needed.
+ * @input_val: the select input value for each pin in this group. The size of
+ * this array is the same as pins.
* @configs: the config for each pin in this group. The size of this
* array is the same as pins.
*/
@@ -34,6 +38,8 @@ struct imx_pin_group {
unsigned int *pins;
unsigned npins;
unsigned int *mux_mode;
+ u16 *input_reg;
+ unsigned int *input_val;
unsigned long *configs;
};
@@ -51,30 +57,19 @@ struct imx_pmx_func {
/**
* struct imx_pin_reg - describe a pin reg map
- * The last 3 members are used for select input setting
- * @pid: pin id
* @mux_reg: mux register offset
* @conf_reg: config register offset
- * @mux_mode: mux mode
- * @input_reg: select input register offset for this mux if any
- * 0 if no select input setting needed.
- * @input_val: the value set to select input register
*/
struct imx_pin_reg {
- u16 pid;
u16 mux_reg;
u16 conf_reg;
- u8 mux_mode;
- u16 input_reg;
- u8 input_val;
};
struct imx_pinctrl_soc_info {
struct device *dev;
const struct pinctrl_pin_desc *pins;
unsigned int npins;
- const struct imx_pin_reg *pin_regs;
- unsigned int npin_regs;
+ struct imx_pin_reg *pin_regs;
struct imx_pin_group *groups;
unsigned int ngroups;
struct imx_pmx_func *functions;
@@ -84,16 +79,6 @@ struct imx_pinctrl_soc_info {
#define NO_MUX 0x0
#define NO_PAD 0x0
-#define IMX_PIN_REG(id, conf, mux, mode, input, val) \
- { \
- .pid = id, \
- .conf_reg = conf, \
- .mux_reg = mux, \
- .mux_mode = mode, \
- .input_reg = input, \
- .input_val = val, \
- }
-
#define IMX_PINCTRL_PIN(pin) PINCTRL_PIN(pin, #pin)
#define PAD_CTL_MASK(len) ((1 << len) - 1)
diff --git a/drivers/pinctrl/pinctrl-imx35.c b/drivers/pinctrl/pinctrl-imx35.c
index 6e21411..c454982 100644
--- a/drivers/pinctrl/pinctrl-imx35.c
+++ b/drivers/pinctrl/pinctrl-imx35.c
@@ -24,1249 +24,496 @@
#include "pinctrl-imx.h"
enum imx35_pads {
- MX35_PAD_CAPTURE = 0,
- MX35_PAD_COMPARE = 1,
- MX35_PAD_WDOG_RST = 2,
- MX35_PAD_GPIO1_0 = 3,
- MX35_PAD_GPIO1_1 = 4,
- MX35_PAD_GPIO2_0 = 5,
- MX35_PAD_GPIO3_0 = 6,
- MX35_PAD_RESET_IN_B = 7,
- MX35_PAD_POR_B = 8,
- MX35_PAD_CLKO = 9,
- MX35_PAD_BOOT_MODE0 = 10,
- MX35_PAD_BOOT_MODE1 = 11,
- MX35_PAD_CLK_MODE0 = 12,
- MX35_PAD_CLK_MODE1 = 13,
- MX35_PAD_POWER_FAIL = 14,
- MX35_PAD_VSTBY = 15,
- MX35_PAD_A0 = 16,
- MX35_PAD_A1 = 17,
- MX35_PAD_A2 = 18,
- MX35_PAD_A3 = 19,
- MX35_PAD_A4 = 20,
- MX35_PAD_A5 = 21,
- MX35_PAD_A6 = 22,
- MX35_PAD_A7 = 23,
- MX35_PAD_A8 = 24,
- MX35_PAD_A9 = 25,
- MX35_PAD_A10 = 26,
- MX35_PAD_MA10 = 27,
- MX35_PAD_A11 = 28,
- MX35_PAD_A12 = 29,
- MX35_PAD_A13 = 30,
- MX35_PAD_A14 = 31,
- MX35_PAD_A15 = 32,
- MX35_PAD_A16 = 33,
- MX35_PAD_A17 = 34,
- MX35_PAD_A18 = 35,
- MX35_PAD_A19 = 36,
- MX35_PAD_A20 = 37,
- MX35_PAD_A21 = 38,
- MX35_PAD_A22 = 39,
- MX35_PAD_A23 = 40,
- MX35_PAD_A24 = 41,
- MX35_PAD_A25 = 42,
- MX35_PAD_SDBA1 = 43,
- MX35_PAD_SDBA0 = 44,
- MX35_PAD_SD0 = 45,
- MX35_PAD_SD1 = 46,
- MX35_PAD_SD2 = 47,
- MX35_PAD_SD3 = 48,
- MX35_PAD_SD4 = 49,
- MX35_PAD_SD5 = 50,
- MX35_PAD_SD6 = 51,
- MX35_PAD_SD7 = 52,
- MX35_PAD_SD8 = 53,
- MX35_PAD_SD9 = 54,
- MX35_PAD_SD10 = 55,
- MX35_PAD_SD11 = 56,
- MX35_PAD_SD12 = 57,
- MX35_PAD_SD13 = 58,
- MX35_PAD_SD14 = 59,
- MX35_PAD_SD15 = 60,
- MX35_PAD_SD16 = 61,
- MX35_PAD_SD17 = 62,
- MX35_PAD_SD18 = 63,
- MX35_PAD_SD19 = 64,
- MX35_PAD_SD20 = 65,
- MX35_PAD_SD21 = 66,
- MX35_PAD_SD22 = 67,
- MX35_PAD_SD23 = 68,
- MX35_PAD_SD24 = 69,
- MX35_PAD_SD25 = 70,
- MX35_PAD_SD26 = 71,
- MX35_PAD_SD27 = 72,
- MX35_PAD_SD28 = 73,
- MX35_PAD_SD29 = 74,
- MX35_PAD_SD30 = 75,
- MX35_PAD_SD31 = 76,
- MX35_PAD_DQM0 = 77,
- MX35_PAD_DQM1 = 78,
- MX35_PAD_DQM2 = 79,
- MX35_PAD_DQM3 = 80,
- MX35_PAD_EB0 = 81,
- MX35_PAD_EB1 = 82,
- MX35_PAD_OE = 83,
- MX35_PAD_CS0 = 84,
- MX35_PAD_CS1 = 85,
- MX35_PAD_CS2 = 86,
- MX35_PAD_CS3 = 87,
- MX35_PAD_CS4 = 88,
- MX35_PAD_CS5 = 89,
- MX35_PAD_NF_CE0 = 90,
- MX35_PAD_ECB = 91,
- MX35_PAD_LBA = 92,
- MX35_PAD_BCLK = 93,
- MX35_PAD_RW = 94,
- MX35_PAD_RAS = 95,
- MX35_PAD_CAS = 96,
- MX35_PAD_SDWE = 97,
- MX35_PAD_SDCKE0 = 98,
- MX35_PAD_SDCKE1 = 99,
- MX35_PAD_SDCLK = 100,
- MX35_PAD_SDQS0 = 101,
- MX35_PAD_SDQS1 = 102,
- MX35_PAD_SDQS2 = 103,
- MX35_PAD_SDQS3 = 104,
- MX35_PAD_NFWE_B = 105,
- MX35_PAD_NFRE_B = 106,
- MX35_PAD_NFALE = 107,
- MX35_PAD_NFCLE = 108,
- MX35_PAD_NFWP_B = 109,
- MX35_PAD_NFRB = 110,
- MX35_PAD_D15 = 111,
- MX35_PAD_D14 = 112,
- MX35_PAD_D13 = 113,
- MX35_PAD_D12 = 114,
- MX35_PAD_D11 = 115,
- MX35_PAD_D10 = 116,
- MX35_PAD_D9 = 117,
- MX35_PAD_D8 = 118,
- MX35_PAD_D7 = 119,
- MX35_PAD_D6 = 120,
- MX35_PAD_D5 = 121,
- MX35_PAD_D4 = 122,
- MX35_PAD_D3 = 123,
- MX35_PAD_D2 = 124,
- MX35_PAD_D1 = 125,
- MX35_PAD_D0 = 126,
- MX35_PAD_CSI_D8 = 127,
- MX35_PAD_CSI_D9 = 128,
- MX35_PAD_CSI_D10 = 129,
- MX35_PAD_CSI_D11 = 130,
- MX35_PAD_CSI_D12 = 131,
- MX35_PAD_CSI_D13 = 132,
- MX35_PAD_CSI_D14 = 133,
- MX35_PAD_CSI_D15 = 134,
- MX35_PAD_CSI_MCLK = 135,
- MX35_PAD_CSI_VSYNC = 136,
- MX35_PAD_CSI_HSYNC = 137,
- MX35_PAD_CSI_PIXCLK = 138,
- MX35_PAD_I2C1_CLK = 139,
- MX35_PAD_I2C1_DAT = 140,
- MX35_PAD_I2C2_CLK = 141,
- MX35_PAD_I2C2_DAT = 142,
- MX35_PAD_STXD4 = 143,
- MX35_PAD_SRXD4 = 144,
- MX35_PAD_SCK4 = 145,
- MX35_PAD_STXFS4 = 146,
- MX35_PAD_STXD5 = 147,
- MX35_PAD_SRXD5 = 148,
- MX35_PAD_SCK5 = 149,
- MX35_PAD_STXFS5 = 150,
- MX35_PAD_SCKR = 151,
- MX35_PAD_FSR = 152,
- MX35_PAD_HCKR = 153,
- MX35_PAD_SCKT = 154,
- MX35_PAD_FST = 155,
- MX35_PAD_HCKT = 156,
- MX35_PAD_TX5_RX0 = 157,
- MX35_PAD_TX4_RX1 = 158,
- MX35_PAD_TX3_RX2 = 159,
- MX35_PAD_TX2_RX3 = 160,
- MX35_PAD_TX1 = 161,
- MX35_PAD_TX0 = 162,
- MX35_PAD_CSPI1_MOSI = 163,
- MX35_PAD_CSPI1_MISO = 164,
- MX35_PAD_CSPI1_SS0 = 165,
- MX35_PAD_CSPI1_SS1 = 166,
- MX35_PAD_CSPI1_SCLK = 167,
- MX35_PAD_CSPI1_SPI_RDY = 168,
- MX35_PAD_RXD1 = 169,
- MX35_PAD_TXD1 = 170,
- MX35_PAD_RTS1 = 171,
- MX35_PAD_CTS1 = 172,
- MX35_PAD_RXD2 = 173,
- MX35_PAD_TXD2 = 174,
- MX35_PAD_RTS2 = 175,
- MX35_PAD_CTS2 = 176,
- MX35_PAD_RTCK = 177,
- MX35_PAD_TCK = 178,
- MX35_PAD_TMS = 179,
- MX35_PAD_TDI = 180,
- MX35_PAD_TDO = 181,
- MX35_PAD_TRSTB = 182,
- MX35_PAD_DE_B = 183,
- MX35_PAD_SJC_MOD = 184,
- MX35_PAD_USBOTG_PWR = 185,
- MX35_PAD_USBOTG_OC = 186,
- MX35_PAD_LD0 = 187,
- MX35_PAD_LD1 = 188,
- MX35_PAD_LD2 = 189,
- MX35_PAD_LD3 = 190,
- MX35_PAD_LD4 = 191,
- MX35_PAD_LD5 = 192,
- MX35_PAD_LD6 = 193,
- MX35_PAD_LD7 = 194,
- MX35_PAD_LD8 = 195,
- MX35_PAD_LD9 = 196,
- MX35_PAD_LD10 = 197,
- MX35_PAD_LD11 = 198,
- MX35_PAD_LD12 = 199,
- MX35_PAD_LD13 = 200,
- MX35_PAD_LD14 = 201,
- MX35_PAD_LD15 = 202,
- MX35_PAD_LD16 = 203,
- MX35_PAD_LD17 = 204,
- MX35_PAD_LD18 = 205,
- MX35_PAD_LD19 = 206,
- MX35_PAD_LD20 = 207,
- MX35_PAD_LD21 = 208,
- MX35_PAD_LD22 = 209,
- MX35_PAD_LD23 = 210,
- MX35_PAD_D3_HSYNC = 211,
- MX35_PAD_D3_FPSHIFT = 212,
- MX35_PAD_D3_DRDY = 213,
- MX35_PAD_CONTRAST = 214,
- MX35_PAD_D3_VSYNC = 215,
- MX35_PAD_D3_REV = 216,
- MX35_PAD_D3_CLS = 217,
- MX35_PAD_D3_SPL = 218,
- MX35_PAD_SD1_CMD = 219,
- MX35_PAD_SD1_CLK = 220,
- MX35_PAD_SD1_DATA0 = 221,
- MX35_PAD_SD1_DATA1 = 222,
- MX35_PAD_SD1_DATA2 = 223,
- MX35_PAD_SD1_DATA3 = 224,
- MX35_PAD_SD2_CMD = 225,
- MX35_PAD_SD2_CLK = 226,
- MX35_PAD_SD2_DATA0 = 227,
- MX35_PAD_SD2_DATA1 = 228,
- MX35_PAD_SD2_DATA2 = 229,
- MX35_PAD_SD2_DATA3 = 230,
- MX35_PAD_ATA_CS0 = 231,
- MX35_PAD_ATA_CS1 = 232,
- MX35_PAD_ATA_DIOR = 233,
- MX35_PAD_ATA_DIOW = 234,
- MX35_PAD_ATA_DMACK = 235,
- MX35_PAD_ATA_RESET_B = 236,
- MX35_PAD_ATA_IORDY = 237,
- MX35_PAD_ATA_DATA0 = 238,
- MX35_PAD_ATA_DATA1 = 239,
- MX35_PAD_ATA_DATA2 = 240,
- MX35_PAD_ATA_DATA3 = 241,
- MX35_PAD_ATA_DATA4 = 242,
- MX35_PAD_ATA_DATA5 = 243,
- MX35_PAD_ATA_DATA6 = 244,
- MX35_PAD_ATA_DATA7 = 245,
- MX35_PAD_ATA_DATA8 = 246,
- MX35_PAD_ATA_DATA9 = 247,
- MX35_PAD_ATA_DATA10 = 248,
- MX35_PAD_ATA_DATA11 = 249,
- MX35_PAD_ATA_DATA12 = 250,
- MX35_PAD_ATA_DATA13 = 251,
- MX35_PAD_ATA_DATA14 = 252,
- MX35_PAD_ATA_DATA15 = 253,
- MX35_PAD_ATA_INTRQ = 254,
- MX35_PAD_ATA_BUFF_EN = 255,
- MX35_PAD_ATA_DMARQ = 256,
- MX35_PAD_ATA_DA0 = 257,
- MX35_PAD_ATA_DA1 = 258,
- MX35_PAD_ATA_DA2 = 259,
- MX35_PAD_MLB_CLK = 260,
- MX35_PAD_MLB_DAT = 261,
- MX35_PAD_MLB_SIG = 262,
- MX35_PAD_FEC_TX_CLK = 263,
- MX35_PAD_FEC_RX_CLK = 264,
- MX35_PAD_FEC_RX_DV = 265,
- MX35_PAD_FEC_COL = 266,
- MX35_PAD_FEC_RDATA0 = 267,
- MX35_PAD_FEC_TDATA0 = 268,
- MX35_PAD_FEC_TX_EN = 269,
- MX35_PAD_FEC_MDC = 270,
- MX35_PAD_FEC_MDIO = 271,
- MX35_PAD_FEC_TX_ERR = 272,
- MX35_PAD_FEC_RX_ERR = 273,
- MX35_PAD_FEC_CRS = 274,
- MX35_PAD_FEC_RDATA1 = 275,
- MX35_PAD_FEC_TDATA1 = 276,
- MX35_PAD_FEC_RDATA2 = 277,
- MX35_PAD_FEC_TDATA2 = 278,
- MX35_PAD_FEC_RDATA3 = 279,
- MX35_PAD_FEC_TDATA3 = 280,
- MX35_PAD_EXT_ARMCLK = 281,
- MX35_PAD_TEST_MODE = 282,
-};
-
-/* imx35 register maps */
-static struct imx_pin_reg imx35_pin_regs[] = {
- [0] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 0, 0x0, 0), /* MX35_PAD_CAPTURE__GPT_CAPIN1 */
- [1] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 1, 0x0, 0), /* MX35_PAD_CAPTURE__GPT_CMPOUT2 */
- [2] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 2, 0x7f4, 0), /* MX35_PAD_CAPTURE__CSPI2_SS1 */
- [3] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 3, 0x0, 0), /* MX35_PAD_CAPTURE__EPIT1_EPITO */
- [4] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 4, 0x7d0, 0), /* MX35_PAD_CAPTURE__CCM_CLK32K */
- [5] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 5, 0x850, 0), /* MX35_PAD_CAPTURE__GPIO1_4 */
- [6] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 0, 0x0, 0), /* MX35_PAD_COMPARE__GPT_CMPOUT1 */
- [7] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 1, 0x0, 0), /* MX35_PAD_COMPARE__GPT_CAPIN2 */
- [8] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 2, 0x0, 0), /* MX35_PAD_COMPARE__GPT_CMPOUT3 */
- [9] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 3, 0x0, 0), /* MX35_PAD_COMPARE__EPIT2_EPITO */
- [10] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 5, 0x854, 0), /* MX35_PAD_COMPARE__GPIO1_5 */
- [11] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 7, 0x0, 0), /* MX35_PAD_COMPARE__SDMA_EXTDMA_2 */
- [12] = IMX_PIN_REG(MX35_PAD_WDOG_RST, 0x330, 0x00c, 0, 0x0, 0), /* MX35_PAD_WDOG_RST__WDOG_WDOG_B */
- [13] = IMX_PIN_REG(MX35_PAD_WDOG_RST, 0x330, 0x00c, 3, 0x0, 0), /* MX35_PAD_WDOG_RST__IPU_FLASH_STROBE */
- [14] = IMX_PIN_REG(MX35_PAD_WDOG_RST, 0x330, 0x00c, 5, 0x858, 0), /* MX35_PAD_WDOG_RST__GPIO1_6 */
- [15] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 0, 0x82c, 0), /* MX35_PAD_GPIO1_0__GPIO1_0 */
- [16] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 1, 0x7d4, 0), /* MX35_PAD_GPIO1_0__CCM_PMIC_RDY */
- [17] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 2, 0x990, 0), /* MX35_PAD_GPIO1_0__OWIRE_LINE */
- [18] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 7, 0x0, 0), /* MX35_PAD_GPIO1_0__SDMA_EXTDMA_0 */
- [19] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 0, 0x838, 0), /* MX35_PAD_GPIO1_1__GPIO1_1 */
- [20] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 2, 0x0, 0), /* MX35_PAD_GPIO1_1__PWM_PWMO */
- [21] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 3, 0x7d8, 0), /* MX35_PAD_GPIO1_1__CSPI1_SS2 */
- [22] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 6, 0x0, 0), /* MX35_PAD_GPIO1_1__SCC_TAMPER_DETECT */
- [23] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 7, 0x0, 0), /* MX35_PAD_GPIO1_1__SDMA_EXTDMA_1 */
- [24] = IMX_PIN_REG(MX35_PAD_GPIO2_0, 0x33c, 0x018, 0, 0x868, 0), /* MX35_PAD_GPIO2_0__GPIO2_0 */
- [25] = IMX_PIN_REG(MX35_PAD_GPIO2_0, 0x33c, 0x018, 1, 0x0, 0), /* MX35_PAD_GPIO2_0__USB_TOP_USBOTG_CLK */
- [26] = IMX_PIN_REG(MX35_PAD_GPIO3_0, 0x340, 0x01c, 0, 0x8e8, 0), /* MX35_PAD_GPIO3_0__GPIO3_0 */
- [27] = IMX_PIN_REG(MX35_PAD_GPIO3_0, 0x340, 0x01c, 1, 0x0, 0), /* MX35_PAD_GPIO3_0__USB_TOP_USBH2_CLK */
- [28] = IMX_PIN_REG(MX35_PAD_RESET_IN_B, 0x344, 0x0, 0, 0x0, 0), /* MX35_PAD_RESET_IN_B__CCM_RESET_IN_B */
- [29] = IMX_PIN_REG(MX35_PAD_POR_B, 0x348, 0x0, 0, 0x0, 0), /* MX35_PAD_POR_B__CCM_POR_B */
- [30] = IMX_PIN_REG(MX35_PAD_CLKO, 0x34c, 0x020, 0, 0x0, 0), /* MX35_PAD_CLKO__CCM_CLKO */
- [31] = IMX_PIN_REG(MX35_PAD_CLKO, 0x34c, 0x020, 5, 0x860, 0), /* MX35_PAD_CLKO__GPIO1_8 */
- [32] = IMX_PIN_REG(MX35_PAD_BOOT_MODE0, 0x350, 0x0, 0, 0x0, 0), /* MX35_PAD_BOOT_MODE0__CCM_BOOT_MODE_0 */
- [33] = IMX_PIN_REG(MX35_PAD_BOOT_MODE1, 0x354, 0x0, 0, 0x0, 0), /* MX35_PAD_BOOT_MODE1__CCM_BOOT_MODE_1 */
- [34] = IMX_PIN_REG(MX35_PAD_CLK_MODE0, 0x358, 0x0, 0, 0x0, 0), /* MX35_PAD_CLK_MODE0__CCM_CLK_MODE_0 */
- [35] = IMX_PIN_REG(MX35_PAD_CLK_MODE1, 0x35c, 0x0, 0, 0x0, 0), /* MX35_PAD_CLK_MODE1__CCM_CLK_MODE_1 */
- [36] = IMX_PIN_REG(MX35_PAD_POWER_FAIL, 0x360, 0x0, 0, 0x0, 0), /* MX35_PAD_POWER_FAIL__CCM_DSM_WAKEUP_INT_26 */
- [37] = IMX_PIN_REG(MX35_PAD_VSTBY, 0x364, 0x024, 0, 0x0, 0), /* MX35_PAD_VSTBY__CCM_VSTBY */
- [38] = IMX_PIN_REG(MX35_PAD_VSTBY, 0x364, 0x024, 5, 0x85c, 0), /* MX35_PAD_VSTBY__GPIO1_7 */
- [39] = IMX_PIN_REG(MX35_PAD_A0, 0x368, 0x028, 0, 0x0, 0), /* MX35_PAD_A0__EMI_EIM_DA_L_0 */
- [40] = IMX_PIN_REG(MX35_PAD_A1, 0x36c, 0x02c, 0, 0x0, 0), /* MX35_PAD_A1__EMI_EIM_DA_L_1 */
- [41] = IMX_PIN_REG(MX35_PAD_A2, 0x370, 0x030, 0, 0x0, 0), /* MX35_PAD_A2__EMI_EIM_DA_L_2 */
- [42] = IMX_PIN_REG(MX35_PAD_A3, 0x374, 0x034, 0, 0x0, 0), /* MX35_PAD_A3__EMI_EIM_DA_L_3 */
- [43] = IMX_PIN_REG(MX35_PAD_A4, 0x378, 0x038, 0, 0x0, 0), /* MX35_PAD_A4__EMI_EIM_DA_L_4 */
- [44] = IMX_PIN_REG(MX35_PAD_A5, 0x37c, 0x03c, 0, 0x0, 0), /* MX35_PAD_A5__EMI_EIM_DA_L_5 */
- [45] = IMX_PIN_REG(MX35_PAD_A6, 0x380, 0x040, 0, 0x0, 0), /* MX35_PAD_A6__EMI_EIM_DA_L_6 */
- [46] = IMX_PIN_REG(MX35_PAD_A7, 0x384, 0x044, 0, 0x0, 0), /* MX35_PAD_A7__EMI_EIM_DA_L_7 */
- [47] = IMX_PIN_REG(MX35_PAD_A8, 0x388, 0x048, 0, 0x0, 0), /* MX35_PAD_A8__EMI_EIM_DA_H_8 */
- [48] = IMX_PIN_REG(MX35_PAD_A9, 0x38c, 0x04c, 0, 0x0, 0), /* MX35_PAD_A9__EMI_EIM_DA_H_9 */
- [49] = IMX_PIN_REG(MX35_PAD_A10, 0x390, 0x050, 0, 0x0, 0), /* MX35_PAD_A10__EMI_EIM_DA_H_10 */
- [50] = IMX_PIN_REG(MX35_PAD_MA10, 0x394, 0x054, 0, 0x0, 0), /* MX35_PAD_MA10__EMI_MA10 */
- [51] = IMX_PIN_REG(MX35_PAD_A11, 0x398, 0x058, 0, 0x0, 0), /* MX35_PAD_A11__EMI_EIM_DA_H_11 */
- [52] = IMX_PIN_REG(MX35_PAD_A12, 0x39c, 0x05c, 0, 0x0, 0), /* MX35_PAD_A12__EMI_EIM_DA_H_12 */
- [53] = IMX_PIN_REG(MX35_PAD_A13, 0x3a0, 0x060, 0, 0x0, 0), /* MX35_PAD_A13__EMI_EIM_DA_H_13 */
- [54] = IMX_PIN_REG(MX35_PAD_A14, 0x3a4, 0x064, 0, 0x0, 0), /* MX35_PAD_A14__EMI_EIM_DA_H2_14 */
- [55] = IMX_PIN_REG(MX35_PAD_A15, 0x3a8, 0x068, 0, 0x0, 0), /* MX35_PAD_A15__EMI_EIM_DA_H2_15 */
- [56] = IMX_PIN_REG(MX35_PAD_A16, 0x3ac, 0x06c, 0, 0x0, 0), /* MX35_PAD_A16__EMI_EIM_A_16 */
- [57] = IMX_PIN_REG(MX35_PAD_A17, 0x3b0, 0x070, 0, 0x0, 0), /* MX35_PAD_A17__EMI_EIM_A_17 */
- [58] = IMX_PIN_REG(MX35_PAD_A18, 0x3b4, 0x074, 0, 0x0, 0), /* MX35_PAD_A18__EMI_EIM_A_18 */
- [59] = IMX_PIN_REG(MX35_PAD_A19, 0x3b8, 0x078, 0, 0x0, 0), /* MX35_PAD_A19__EMI_EIM_A_19 */
- [60] = IMX_PIN_REG(MX35_PAD_A20, 0x3bc, 0x07c, 0, 0x0, 0), /* MX35_PAD_A20__EMI_EIM_A_20 */
- [61] = IMX_PIN_REG(MX35_PAD_A21, 0x3c0, 0x080, 0, 0x0, 0), /* MX35_PAD_A21__EMI_EIM_A_21 */
- [62] = IMX_PIN_REG(MX35_PAD_A22, 0x3c4, 0x084, 0, 0x0, 0), /* MX35_PAD_A22__EMI_EIM_A_22 */
- [63] = IMX_PIN_REG(MX35_PAD_A23, 0x3c8, 0x088, 0, 0x0, 0), /* MX35_PAD_A23__EMI_EIM_A_23 */
- [64] = IMX_PIN_REG(MX35_PAD_A24, 0x3cc, 0x08c, 0, 0x0, 0), /* MX35_PAD_A24__EMI_EIM_A_24 */
- [65] = IMX_PIN_REG(MX35_PAD_A25, 0x3d0, 0x090, 0, 0x0, 0), /* MX35_PAD_A25__EMI_EIM_A_25 */
- [66] = IMX_PIN_REG(MX35_PAD_SDBA1, 0x3d4, 0x0, 0, 0x0, 0), /* MX35_PAD_SDBA1__EMI_EIM_SDBA1 */
- [67] = IMX_PIN_REG(MX35_PAD_SDBA0, 0x3d8, 0x0, 0, 0x0, 0), /* MX35_PAD_SDBA0__EMI_EIM_SDBA0 */
- [68] = IMX_PIN_REG(MX35_PAD_SD0, 0x3dc, 0x0, 0, 0x0, 0), /* MX35_PAD_SD0__EMI_DRAM_D_0 */
- [69] = IMX_PIN_REG(MX35_PAD_SD1, 0x3e0, 0x0, 0, 0x0, 0), /* MX35_PAD_SD1__EMI_DRAM_D_1 */
- [70] = IMX_PIN_REG(MX35_PAD_SD2, 0x3e4, 0x0, 0, 0x0, 0), /* MX35_PAD_SD2__EMI_DRAM_D_2 */
- [71] = IMX_PIN_REG(MX35_PAD_SD3, 0x3e8, 0x0, 0, 0x0, 0), /* MX35_PAD_SD3__EMI_DRAM_D_3 */
- [72] = IMX_PIN_REG(MX35_PAD_SD4, 0x3ec, 0x0, 0, 0x0, 0), /* MX35_PAD_SD4__EMI_DRAM_D_4 */
- [73] = IMX_PIN_REG(MX35_PAD_SD5, 0x3f0, 0x0, 0, 0x0, 0), /* MX35_PAD_SD5__EMI_DRAM_D_5 */
- [74] = IMX_PIN_REG(MX35_PAD_SD6, 0x3f4, 0x0, 0, 0x0, 0), /* MX35_PAD_SD6__EMI_DRAM_D_6 */
- [75] = IMX_PIN_REG(MX35_PAD_SD7, 0x3f8, 0x0, 0, 0x0, 0), /* MX35_PAD_SD7__EMI_DRAM_D_7 */
- [76] = IMX_PIN_REG(MX35_PAD_SD8, 0x3fc, 0x0, 0, 0x0, 0), /* MX35_PAD_SD8__EMI_DRAM_D_8 */
- [77] = IMX_PIN_REG(MX35_PAD_SD9, 0x400, 0x0, 0, 0x0, 0), /* MX35_PAD_SD9__EMI_DRAM_D_9 */
- [78] = IMX_PIN_REG(MX35_PAD_SD10, 0x404, 0x0, 0, 0x0, 0), /* MX35_PAD_SD10__EMI_DRAM_D_10 */
- [79] = IMX_PIN_REG(MX35_PAD_SD11, 0x408, 0x0, 0, 0x0, 0), /* MX35_PAD_SD11__EMI_DRAM_D_11 */
- [80] = IMX_PIN_REG(MX35_PAD_SD12, 0x40c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD12__EMI_DRAM_D_12 */
- [81] = IMX_PIN_REG(MX35_PAD_SD13, 0x410, 0x0, 0, 0x0, 0), /* MX35_PAD_SD13__EMI_DRAM_D_13 */
- [82] = IMX_PIN_REG(MX35_PAD_SD14, 0x414, 0x0, 0, 0x0, 0), /* MX35_PAD_SD14__EMI_DRAM_D_14 */
- [83] = IMX_PIN_REG(MX35_PAD_SD15, 0x418, 0x0, 0, 0x0, 0), /* MX35_PAD_SD15__EMI_DRAM_D_15 */
- [84] = IMX_PIN_REG(MX35_PAD_SD16, 0x41c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD16__EMI_DRAM_D_16 */
- [85] = IMX_PIN_REG(MX35_PAD_SD17, 0x420, 0x0, 0, 0x0, 0), /* MX35_PAD_SD17__EMI_DRAM_D_17 */
- [86] = IMX_PIN_REG(MX35_PAD_SD18, 0x424, 0x0, 0, 0x0, 0), /* MX35_PAD_SD18__EMI_DRAM_D_18 */
- [87] = IMX_PIN_REG(MX35_PAD_SD19, 0x428, 0x0, 0, 0x0, 0), /* MX35_PAD_SD19__EMI_DRAM_D_19 */
- [88] = IMX_PIN_REG(MX35_PAD_SD20, 0x42c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD20__EMI_DRAM_D_20 */
- [89] = IMX_PIN_REG(MX35_PAD_SD21, 0x430, 0x0, 0, 0x0, 0), /* MX35_PAD_SD21__EMI_DRAM_D_21 */
- [90] = IMX_PIN_REG(MX35_PAD_SD22, 0x434, 0x0, 0, 0x0, 0), /* MX35_PAD_SD22__EMI_DRAM_D_22 */
- [91] = IMX_PIN_REG(MX35_PAD_SD23, 0x438, 0x0, 0, 0x0, 0), /* MX35_PAD_SD23__EMI_DRAM_D_23 */
- [92] = IMX_PIN_REG(MX35_PAD_SD24, 0x43c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD24__EMI_DRAM_D_24 */
- [93] = IMX_PIN_REG(MX35_PAD_SD25, 0x440, 0x0, 0, 0x0, 0), /* MX35_PAD_SD25__EMI_DRAM_D_25 */
- [94] = IMX_PIN_REG(MX35_PAD_SD26, 0x444, 0x0, 0, 0x0, 0), /* MX35_PAD_SD26__EMI_DRAM_D_26 */
- [95] = IMX_PIN_REG(MX35_PAD_SD27, 0x448, 0x0, 0, 0x0, 0), /* MX35_PAD_SD27__EMI_DRAM_D_27 */
- [96] = IMX_PIN_REG(MX35_PAD_SD28, 0x44c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD28__EMI_DRAM_D_28 */
- [97] = IMX_PIN_REG(MX35_PAD_SD29, 0x450, 0x0, 0, 0x0, 0), /* MX35_PAD_SD29__EMI_DRAM_D_29 */
- [98] = IMX_PIN_REG(MX35_PAD_SD30, 0x454, 0x0, 0, 0x0, 0), /* MX35_PAD_SD30__EMI_DRAM_D_30 */
- [99] = IMX_PIN_REG(MX35_PAD_SD31, 0x458, 0x0, 0, 0x0, 0), /* MX35_PAD_SD31__EMI_DRAM_D_31 */
- [100] = IMX_PIN_REG(MX35_PAD_DQM0, 0x45c, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM0__EMI_DRAM_DQM_0 */
- [101] = IMX_PIN_REG(MX35_PAD_DQM1, 0x460, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM1__EMI_DRAM_DQM_1 */
- [102] = IMX_PIN_REG(MX35_PAD_DQM2, 0x464, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM2__EMI_DRAM_DQM_2 */
- [103] = IMX_PIN_REG(MX35_PAD_DQM3, 0x468, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM3__EMI_DRAM_DQM_3 */
- [104] = IMX_PIN_REG(MX35_PAD_EB0, 0x46c, 0x094, 0, 0x0, 0), /* MX35_PAD_EB0__EMI_EIM_EB0_B */
- [105] = IMX_PIN_REG(MX35_PAD_EB1, 0x470, 0x098, 0, 0x0, 0), /* MX35_PAD_EB1__EMI_EIM_EB1_B */
- [106] = IMX_PIN_REG(MX35_PAD_OE, 0x474, 0x09c, 0, 0x0, 0), /* MX35_PAD_OE__EMI_EIM_OE */
- [107] = IMX_PIN_REG(MX35_PAD_CS0, 0x478, 0x0a0, 0, 0x0, 0), /* MX35_PAD_CS0__EMI_EIM_CS0 */
- [108] = IMX_PIN_REG(MX35_PAD_CS1, 0x47c, 0x0a4, 0, 0x0, 0), /* MX35_PAD_CS1__EMI_EIM_CS1 */
- [109] = IMX_PIN_REG(MX35_PAD_CS1, 0x47c, 0x0a4, 3, 0x0, 0), /* MX35_PAD_CS1__EMI_NANDF_CE3 */
- [110] = IMX_PIN_REG(MX35_PAD_CS2, 0x480, 0x0a8, 0, 0x0, 0), /* MX35_PAD_CS2__EMI_EIM_CS2 */
- [111] = IMX_PIN_REG(MX35_PAD_CS3, 0x484, 0x0ac, 0, 0x0, 0), /* MX35_PAD_CS3__EMI_EIM_CS3 */
- [112] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 0, 0x0, 0), /* MX35_PAD_CS4__EMI_EIM_CS4 */
- [113] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 1, 0x800, 0), /* MX35_PAD_CS4__EMI_DTACK_B */
- [114] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 3, 0x0, 0), /* MX35_PAD_CS4__EMI_NANDF_CE1 */
- [115] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 5, 0x83c, 0), /* MX35_PAD_CS4__GPIO1_20 */
- [116] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 0, 0x0, 0), /* MX35_PAD_CS5__EMI_EIM_CS5 */
- [117] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 1, 0x7f8, 0), /* MX35_PAD_CS5__CSPI2_SS2 */
- [118] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 2, 0x7d8, 1), /* MX35_PAD_CS5__CSPI1_SS2 */
- [119] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 3, 0x0, 0), /* MX35_PAD_CS5__EMI_NANDF_CE2 */
- [120] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 5, 0x840, 0), /* MX35_PAD_CS5__GPIO1_21 */
- [121] = IMX_PIN_REG(MX35_PAD_NF_CE0, 0x490, 0x0b8, 0, 0x0, 0), /* MX35_PAD_NF_CE0__EMI_NANDF_CE0 */
- [122] = IMX_PIN_REG(MX35_PAD_NF_CE0, 0x490, 0x0b8, 5, 0x844, 0), /* MX35_PAD_NF_CE0__GPIO1_22 */
- [123] = IMX_PIN_REG(MX35_PAD_ECB, 0x494, 0x0, 0, 0x0, 0), /* MX35_PAD_ECB__EMI_EIM_ECB */
- [124] = IMX_PIN_REG(MX35_PAD_LBA, 0x498, 0x0bc, 0, 0x0, 0), /* MX35_PAD_LBA__EMI_EIM_LBA */
- [125] = IMX_PIN_REG(MX35_PAD_BCLK, 0x49c, 0x0c0, 0, 0x0, 0), /* MX35_PAD_BCLK__EMI_EIM_BCLK */
- [126] = IMX_PIN_REG(MX35_PAD_RW, 0x4a0, 0x0c4, 0, 0x0, 0), /* MX35_PAD_RW__EMI_EIM_RW */
- [127] = IMX_PIN_REG(MX35_PAD_RAS, 0x4a4, 0x0, 0, 0x0, 0), /* MX35_PAD_RAS__EMI_DRAM_RAS */
- [128] = IMX_PIN_REG(MX35_PAD_CAS, 0x4a8, 0x0, 0, 0x0, 0), /* MX35_PAD_CAS__EMI_DRAM_CAS */
- [129] = IMX_PIN_REG(MX35_PAD_SDWE, 0x4ac, 0x0, 0, 0x0, 0), /* MX35_PAD_SDWE__EMI_DRAM_SDWE */
- [130] = IMX_PIN_REG(MX35_PAD_SDCKE0, 0x4b0, 0x0, 0, 0x0, 0), /* MX35_PAD_SDCKE0__EMI_DRAM_SDCKE_0 */
- [131] = IMX_PIN_REG(MX35_PAD_SDCKE1, 0x4b4, 0x0, 0, 0x0, 0), /* MX35_PAD_SDCKE1__EMI_DRAM_SDCKE_1 */
- [132] = IMX_PIN_REG(MX35_PAD_SDCLK, 0x4b8, 0x0, 0, 0x0, 0), /* MX35_PAD_SDCLK__EMI_DRAM_SDCLK */
- [133] = IMX_PIN_REG(MX35_PAD_SDQS0, 0x4bc, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS0__EMI_DRAM_SDQS_0 */
- [134] = IMX_PIN_REG(MX35_PAD_SDQS1, 0x4c0, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS1__EMI_DRAM_SDQS_1 */
- [135] = IMX_PIN_REG(MX35_PAD_SDQS2, 0x4c4, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS2__EMI_DRAM_SDQS_2 */
- [136] = IMX_PIN_REG(MX35_PAD_SDQS3, 0x4c8, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS3__EMI_DRAM_SDQS_3 */
- [137] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 0, 0x0, 0), /* MX35_PAD_NFWE_B__EMI_NANDF_WE_B */
- [138] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 1, 0x9d8, 0), /* MX35_PAD_NFWE_B__USB_TOP_USBH2_DATA_3 */
- [139] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 2, 0x924, 0), /* MX35_PAD_NFWE_B__IPU_DISPB_D0_VSYNC */
- [140] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 5, 0x88c, 0), /* MX35_PAD_NFWE_B__GPIO2_18 */
- [141] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 7, 0x0, 0), /* MX35_PAD_NFWE_B__ARM11P_TOP_TRACE_0 */
- [142] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 0, 0x0, 0), /* MX35_PAD_NFRE_B__EMI_NANDF_RE_B */
- [143] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 1, 0x9ec, 0), /* MX35_PAD_NFRE_B__USB_TOP_USBH2_DIR */
- [144] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 2, 0x0, 0), /* MX35_PAD_NFRE_B__IPU_DISPB_BCLK */
- [145] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 5, 0x890, 0), /* MX35_PAD_NFRE_B__GPIO2_19 */
- [146] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 7, 0x0, 0), /* MX35_PAD_NFRE_B__ARM11P_TOP_TRACE_1 */
- [147] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 0, 0x0, 0), /* MX35_PAD_NFALE__EMI_NANDF_ALE */
- [148] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 1, 0x0, 0), /* MX35_PAD_NFALE__USB_TOP_USBH2_STP */
- [149] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 2, 0x0, 0), /* MX35_PAD_NFALE__IPU_DISPB_CS0 */
- [150] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 5, 0x898, 0), /* MX35_PAD_NFALE__GPIO2_20 */
- [151] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 7, 0x0, 0), /* MX35_PAD_NFALE__ARM11P_TOP_TRACE_2 */
- [152] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 0, 0x0, 0), /* MX35_PAD_NFCLE__EMI_NANDF_CLE */
- [153] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 1, 0x9f0, 0), /* MX35_PAD_NFCLE__USB_TOP_USBH2_NXT */
- [154] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 2, 0x0, 0), /* MX35_PAD_NFCLE__IPU_DISPB_PAR_RS */
- [155] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 5, 0x89c, 0), /* MX35_PAD_NFCLE__GPIO2_21 */
- [156] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 7, 0x0, 0), /* MX35_PAD_NFCLE__ARM11P_TOP_TRACE_3 */
- [157] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 0, 0x0, 0), /* MX35_PAD_NFWP_B__EMI_NANDF_WP_B */
- [158] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 1, 0x9e8, 0), /* MX35_PAD_NFWP_B__USB_TOP_USBH2_DATA_7 */
- [159] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 2, 0x0, 0), /* MX35_PAD_NFWP_B__IPU_DISPB_WR */
- [160] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 5, 0x8a0, 0), /* MX35_PAD_NFWP_B__GPIO2_22 */
- [161] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 7, 0x0, 0), /* MX35_PAD_NFWP_B__ARM11P_TOP_TRCTL */
- [162] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 0, 0x0, 0), /* MX35_PAD_NFRB__EMI_NANDF_RB */
- [163] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 2, 0x0, 0), /* MX35_PAD_NFRB__IPU_DISPB_RD */
- [164] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 5, 0x8a4, 0), /* MX35_PAD_NFRB__GPIO2_23 */
- [165] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 7, 0x0, 0), /* MX35_PAD_NFRB__ARM11P_TOP_TRCLK */
- [166] = IMX_PIN_REG(MX35_PAD_D15, 0x4e4, 0x0, 0, 0x0, 0), /* MX35_PAD_D15__EMI_EIM_D_15 */
- [167] = IMX_PIN_REG(MX35_PAD_D14, 0x4e8, 0x0, 0, 0x0, 0), /* MX35_PAD_D14__EMI_EIM_D_14 */
- [168] = IMX_PIN_REG(MX35_PAD_D13, 0x4ec, 0x0, 0, 0x0, 0), /* MX35_PAD_D13__EMI_EIM_D_13 */
- [169] = IMX_PIN_REG(MX35_PAD_D12, 0x4f0, 0x0, 0, 0x0, 0), /* MX35_PAD_D12__EMI_EIM_D_12 */
- [170] = IMX_PIN_REG(MX35_PAD_D11, 0x4f4, 0x0, 0, 0x0, 0), /* MX35_PAD_D11__EMI_EIM_D_11 */
- [171] = IMX_PIN_REG(MX35_PAD_D10, 0x4f8, 0x0, 0, 0x0, 0), /* MX35_PAD_D10__EMI_EIM_D_10 */
- [172] = IMX_PIN_REG(MX35_PAD_D9, 0x4fc, 0x0, 0, 0x0, 0), /* MX35_PAD_D9__EMI_EIM_D_9 */
- [173] = IMX_PIN_REG(MX35_PAD_D8, 0x500, 0x0, 0, 0x0, 0), /* MX35_PAD_D8__EMI_EIM_D_8 */
- [174] = IMX_PIN_REG(MX35_PAD_D7, 0x504, 0x0, 0, 0x0, 0), /* MX35_PAD_D7__EMI_EIM_D_7 */
- [175] = IMX_PIN_REG(MX35_PAD_D6, 0x508, 0x0, 0, 0x0, 0), /* MX35_PAD_D6__EMI_EIM_D_6 */
- [176] = IMX_PIN_REG(MX35_PAD_D5, 0x50c, 0x0, 0, 0x0, 0), /* MX35_PAD_D5__EMI_EIM_D_5 */
- [177] = IMX_PIN_REG(MX35_PAD_D4, 0x510, 0x0, 0, 0x0, 0), /* MX35_PAD_D4__EMI_EIM_D_4 */
- [178] = IMX_PIN_REG(MX35_PAD_D3, 0x514, 0x0, 0, 0x0, 0), /* MX35_PAD_D3__EMI_EIM_D_3 */
- [179] = IMX_PIN_REG(MX35_PAD_D2, 0x518, 0x0, 0, 0x0, 0), /* MX35_PAD_D2__EMI_EIM_D_2 */
- [180] = IMX_PIN_REG(MX35_PAD_D1, 0x51c, 0x0, 0, 0x0, 0), /* MX35_PAD_D1__EMI_EIM_D_1 */
- [181] = IMX_PIN_REG(MX35_PAD_D0, 0x520, 0x0, 0, 0x0, 0), /* MX35_PAD_D0__EMI_EIM_D_0 */
- [182] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 0, 0x0, 0), /* MX35_PAD_CSI_D8__IPU_CSI_D_8 */
- [183] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 1, 0x950, 0), /* MX35_PAD_CSI_D8__KPP_COL_0 */
- [184] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 5, 0x83c, 1), /* MX35_PAD_CSI_D8__GPIO1_20 */
- [185] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 7, 0x0, 0), /* MX35_PAD_CSI_D8__ARM11P_TOP_EVNTBUS_13 */
- [186] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 0, 0x0, 0), /* MX35_PAD_CSI_D9__IPU_CSI_D_9 */
- [187] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 1, 0x954, 0), /* MX35_PAD_CSI_D9__KPP_COL_1 */
- [188] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 5, 0x840, 1), /* MX35_PAD_CSI_D9__GPIO1_21 */
- [189] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 7, 0x0, 0), /* MX35_PAD_CSI_D9__ARM11P_TOP_EVNTBUS_14 */
- [190] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 0, 0x0, 0), /* MX35_PAD_CSI_D10__IPU_CSI_D_10 */
- [191] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 1, 0x958, 0), /* MX35_PAD_CSI_D10__KPP_COL_2 */
- [192] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 5, 0x844, 1), /* MX35_PAD_CSI_D10__GPIO1_22 */
- [193] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 7, 0x0, 0), /* MX35_PAD_CSI_D10__ARM11P_TOP_EVNTBUS_15 */
- [194] = IMX_PIN_REG(MX35_PAD_CSI_D11, 0x530, 0x0ec, 0, 0x0, 0), /* MX35_PAD_CSI_D11__IPU_CSI_D_11 */
- [195] = IMX_PIN_REG(MX35_PAD_CSI_D11, 0x530, 0x0ec, 1, 0x95c, 0), /* MX35_PAD_CSI_D11__KPP_COL_3 */
- [196] = IMX_PIN_REG(MX35_PAD_CSI_D11, 0x530, 0x0ec, 5, 0x0, 0), /* MX35_PAD_CSI_D11__GPIO1_23 */
- [197] = IMX_PIN_REG(MX35_PAD_CSI_D12, 0x534, 0x0f0, 0, 0x0, 0), /* MX35_PAD_CSI_D12__IPU_CSI_D_12 */
- [198] = IMX_PIN_REG(MX35_PAD_CSI_D12, 0x534, 0x0f0, 1, 0x970, 0), /* MX35_PAD_CSI_D12__KPP_ROW_0 */
- [199] = IMX_PIN_REG(MX35_PAD_CSI_D12, 0x534, 0x0f0, 5, 0x0, 0), /* MX35_PAD_CSI_D12__GPIO1_24 */
- [200] = IMX_PIN_REG(MX35_PAD_CSI_D13, 0x538, 0x0f4, 0, 0x0, 0), /* MX35_PAD_CSI_D13__IPU_CSI_D_13 */
- [201] = IMX_PIN_REG(MX35_PAD_CSI_D13, 0x538, 0x0f4, 1, 0x974, 0), /* MX35_PAD_CSI_D13__KPP_ROW_1 */
- [202] = IMX_PIN_REG(MX35_PAD_CSI_D13, 0x538, 0x0f4, 5, 0x0, 0), /* MX35_PAD_CSI_D13__GPIO1_25 */
- [203] = IMX_PIN_REG(MX35_PAD_CSI_D14, 0x53c, 0x0f8, 0, 0x0, 0), /* MX35_PAD_CSI_D14__IPU_CSI_D_14 */
- [204] = IMX_PIN_REG(MX35_PAD_CSI_D14, 0x53c, 0x0f8, 1, 0x978, 0), /* MX35_PAD_CSI_D14__KPP_ROW_2 */
- [205] = IMX_PIN_REG(MX35_PAD_CSI_D14, 0x53c, 0x0f8, 5, 0x0, 0), /* MX35_PAD_CSI_D14__GPIO1_26 */
- [206] = IMX_PIN_REG(MX35_PAD_CSI_D15, 0x540, 0x0fc, 0, 0x97c, 0), /* MX35_PAD_CSI_D15__IPU_CSI_D_15 */
- [207] = IMX_PIN_REG(MX35_PAD_CSI_D15, 0x540, 0x0fc, 1, 0x0, 0), /* MX35_PAD_CSI_D15__KPP_ROW_3 */
- [208] = IMX_PIN_REG(MX35_PAD_CSI_D15, 0x540, 0x0fc, 5, 0x0, 0), /* MX35_PAD_CSI_D15__GPIO1_27 */
- [209] = IMX_PIN_REG(MX35_PAD_CSI_MCLK, 0x544, 0x100, 0, 0x0, 0), /* MX35_PAD_CSI_MCLK__IPU_CSI_MCLK */
- [210] = IMX_PIN_REG(MX35_PAD_CSI_MCLK, 0x544, 0x100, 5, 0x0, 0), /* MX35_PAD_CSI_MCLK__GPIO1_28 */
- [211] = IMX_PIN_REG(MX35_PAD_CSI_VSYNC, 0x548, 0x104, 0, 0x0, 0), /* MX35_PAD_CSI_VSYNC__IPU_CSI_VSYNC */
- [212] = IMX_PIN_REG(MX35_PAD_CSI_VSYNC, 0x548, 0x104, 5, 0x0, 0), /* MX35_PAD_CSI_VSYNC__GPIO1_29 */
- [213] = IMX_PIN_REG(MX35_PAD_CSI_HSYNC, 0x54c, 0x108, 0, 0x0, 0), /* MX35_PAD_CSI_HSYNC__IPU_CSI_HSYNC */
- [214] = IMX_PIN_REG(MX35_PAD_CSI_HSYNC, 0x54c, 0x108, 5, 0x0, 0), /* MX35_PAD_CSI_HSYNC__GPIO1_30 */
- [215] = IMX_PIN_REG(MX35_PAD_CSI_PIXCLK, 0x550, 0x10c, 0, 0x0, 0), /* MX35_PAD_CSI_PIXCLK__IPU_CSI_PIXCLK */
- [216] = IMX_PIN_REG(MX35_PAD_CSI_PIXCLK, 0x550, 0x10c, 5, 0x0, 0), /* MX35_PAD_CSI_PIXCLK__GPIO1_31 */
- [217] = IMX_PIN_REG(MX35_PAD_I2C1_CLK, 0x554, 0x110, 0, 0x0, 0), /* MX35_PAD_I2C1_CLK__I2C1_SCL */
- [218] = IMX_PIN_REG(MX35_PAD_I2C1_CLK, 0x554, 0x110, 5, 0x8a8, 0), /* MX35_PAD_I2C1_CLK__GPIO2_24 */
- [219] = IMX_PIN_REG(MX35_PAD_I2C1_CLK, 0x554, 0x110, 6, 0x0, 0), /* MX35_PAD_I2C1_CLK__CCM_USB_BYP_CLK */
- [220] = IMX_PIN_REG(MX35_PAD_I2C1_DAT, 0x558, 0x114, 0, 0x0, 0), /* MX35_PAD_I2C1_DAT__I2C1_SDA */
- [221] = IMX_PIN_REG(MX35_PAD_I2C1_DAT, 0x558, 0x114, 5, 0x8ac, 0), /* MX35_PAD_I2C1_DAT__GPIO2_25 */
- [222] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 0, 0x0, 0), /* MX35_PAD_I2C2_CLK__I2C2_SCL */
- [223] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 1, 0x0, 0), /* MX35_PAD_I2C2_CLK__CAN1_TXCAN */
- [224] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 2, 0x0, 0), /* MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR */
- [225] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 5, 0x8b0, 0), /* MX35_PAD_I2C2_CLK__GPIO2_26 */
- [226] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 6, 0x0, 0), /* MX35_PAD_I2C2_CLK__SDMA_DEBUG_BUS_DEVICE_2 */
- [227] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 0, 0x0, 0), /* MX35_PAD_I2C2_DAT__I2C2_SDA */
- [228] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 1, 0x7c8, 0), /* MX35_PAD_I2C2_DAT__CAN1_RXCAN */
- [229] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 2, 0x9f4, 0), /* MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC */
- [230] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 5, 0x8b4, 0), /* MX35_PAD_I2C2_DAT__GPIO2_27 */
- [231] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 6, 0x0, 0), /* MX35_PAD_I2C2_DAT__SDMA_DEBUG_BUS_DEVICE_3 */
- [232] = IMX_PIN_REG(MX35_PAD_STXD4, 0x564, 0x120, 0, 0x0, 0), /* MX35_PAD_STXD4__AUDMUX_AUD4_TXD */
- [233] = IMX_PIN_REG(MX35_PAD_STXD4, 0x564, 0x120, 5, 0x8b8, 0), /* MX35_PAD_STXD4__GPIO2_28 */
- [234] = IMX_PIN_REG(MX35_PAD_STXD4, 0x564, 0x120, 7, 0x0, 0), /* MX35_PAD_STXD4__ARM11P_TOP_ARM_COREASID0 */
- [235] = IMX_PIN_REG(MX35_PAD_SRXD4, 0x568, 0x124, 0, 0x0, 0), /* MX35_PAD_SRXD4__AUDMUX_AUD4_RXD */
- [236] = IMX_PIN_REG(MX35_PAD_SRXD4, 0x568, 0x124, 5, 0x8bc, 0), /* MX35_PAD_SRXD4__GPIO2_29 */
- [237] = IMX_PIN_REG(MX35_PAD_SRXD4, 0x568, 0x124, 7, 0x0, 0), /* MX35_PAD_SRXD4__ARM11P_TOP_ARM_COREASID1 */
- [238] = IMX_PIN_REG(MX35_PAD_SCK4, 0x56c, 0x128, 0, 0x0, 0), /* MX35_PAD_SCK4__AUDMUX_AUD4_TXC */
- [239] = IMX_PIN_REG(MX35_PAD_SCK4, 0x56c, 0x128, 5, 0x8c4, 0), /* MX35_PAD_SCK4__GPIO2_30 */
- [240] = IMX_PIN_REG(MX35_PAD_SCK4, 0x56c, 0x128, 7, 0x0, 0), /* MX35_PAD_SCK4__ARM11P_TOP_ARM_COREASID2 */
- [241] = IMX_PIN_REG(MX35_PAD_STXFS4, 0x570, 0x12c, 0, 0x0, 0), /* MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS */
- [242] = IMX_PIN_REG(MX35_PAD_STXFS4, 0x570, 0x12c, 5, 0x8c8, 0), /* MX35_PAD_STXFS4__GPIO2_31 */
- [243] = IMX_PIN_REG(MX35_PAD_STXFS4, 0x570, 0x12c, 7, 0x0, 0), /* MX35_PAD_STXFS4__ARM11P_TOP_ARM_COREASID3 */
- [244] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 0, 0x0, 0), /* MX35_PAD_STXD5__AUDMUX_AUD5_TXD */
- [245] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 1, 0x0, 0), /* MX35_PAD_STXD5__SPDIF_SPDIF_OUT1 */
- [246] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 2, 0x7ec, 0), /* MX35_PAD_STXD5__CSPI2_MOSI */
- [247] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 5, 0x82c, 1), /* MX35_PAD_STXD5__GPIO1_0 */
- [248] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 7, 0x0, 0), /* MX35_PAD_STXD5__ARM11P_TOP_ARM_COREASID4 */
- [249] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 0, 0x0, 0), /* MX35_PAD_SRXD5__AUDMUX_AUD5_RXD */
- [250] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 1, 0x998, 0), /* MX35_PAD_SRXD5__SPDIF_SPDIF_IN1 */
- [251] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 2, 0x7e8, 0), /* MX35_PAD_SRXD5__CSPI2_MISO */
- [252] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 5, 0x838, 1), /* MX35_PAD_SRXD5__GPIO1_1 */
- [253] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 7, 0x0, 0), /* MX35_PAD_SRXD5__ARM11P_TOP_ARM_COREASID5 */
- [254] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 0, 0x0, 0), /* MX35_PAD_SCK5__AUDMUX_AUD5_TXC */
- [255] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 1, 0x994, 0), /* MX35_PAD_SCK5__SPDIF_SPDIF_EXTCLK */
- [256] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 2, 0x7e0, 0), /* MX35_PAD_SCK5__CSPI2_SCLK */
- [257] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 5, 0x848, 0), /* MX35_PAD_SCK5__GPIO1_2 */
- [258] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 7, 0x0, 0), /* MX35_PAD_SCK5__ARM11P_TOP_ARM_COREASID6 */
- [259] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 0, 0x0, 0), /* MX35_PAD_STXFS5__AUDMUX_AUD5_TXFS */
- [260] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 2, 0x7e4, 0), /* MX35_PAD_STXFS5__CSPI2_RDY */
- [261] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 5, 0x84c, 0), /* MX35_PAD_STXFS5__GPIO1_3 */
- [262] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 7, 0x0, 0), /* MX35_PAD_STXFS5__ARM11P_TOP_ARM_COREASID7 */
- [263] = IMX_PIN_REG(MX35_PAD_SCKR, 0x584, 0x140, 0, 0x0, 0), /* MX35_PAD_SCKR__ESAI_SCKR */
- [264] = IMX_PIN_REG(MX35_PAD_SCKR, 0x584, 0x140, 5, 0x850, 1), /* MX35_PAD_SCKR__GPIO1_4 */
- [265] = IMX_PIN_REG(MX35_PAD_SCKR, 0x584, 0x140, 7, 0x0, 0), /* MX35_PAD_SCKR__ARM11P_TOP_EVNTBUS_10 */
- [266] = IMX_PIN_REG(MX35_PAD_FSR, 0x588, 0x144, 0, 0x0, 0), /* MX35_PAD_FSR__ESAI_FSR */
- [267] = IMX_PIN_REG(MX35_PAD_FSR, 0x588, 0x144, 5, 0x854, 1), /* MX35_PAD_FSR__GPIO1_5 */
- [268] = IMX_PIN_REG(MX35_PAD_FSR, 0x588, 0x144, 7, 0x0, 0), /* MX35_PAD_FSR__ARM11P_TOP_EVNTBUS_11 */
- [269] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 0, 0x0, 0), /* MX35_PAD_HCKR__ESAI_HCKR */
- [270] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 1, 0x0, 0), /* MX35_PAD_HCKR__AUDMUX_AUD5_RXFS */
- [271] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 2, 0x7f0, 0), /* MX35_PAD_HCKR__CSPI2_SS0 */
- [272] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 3, 0x0, 0), /* MX35_PAD_HCKR__IPU_FLASH_STROBE */
- [273] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 5, 0x858, 1), /* MX35_PAD_HCKR__GPIO1_6 */
- [274] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 7, 0x0, 0), /* MX35_PAD_HCKR__ARM11P_TOP_EVNTBUS_12 */
- [275] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 0, 0x0, 0), /* MX35_PAD_SCKT__ESAI_SCKT */
- [276] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 5, 0x85c, 1), /* MX35_PAD_SCKT__GPIO1_7 */
- [277] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 6, 0x930, 0), /* MX35_PAD_SCKT__IPU_CSI_D_0 */
- [278] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 7, 0x978, 1), /* MX35_PAD_SCKT__KPP_ROW_2 */
- [279] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 0, 0x0, 0), /* MX35_PAD_FST__ESAI_FST */
- [280] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 5, 0x860, 1), /* MX35_PAD_FST__GPIO1_8 */
- [281] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 6, 0x934, 0), /* MX35_PAD_FST__IPU_CSI_D_1 */
- [282] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 7, 0x97c, 1), /* MX35_PAD_FST__KPP_ROW_3 */
- [283] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 0, 0x0, 0), /* MX35_PAD_HCKT__ESAI_HCKT */
- [284] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 1, 0x7a8, 0), /* MX35_PAD_HCKT__AUDMUX_AUD5_RXC */
- [285] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 5, 0x864, 0), /* MX35_PAD_HCKT__GPIO1_9 */
- [286] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 6, 0x938, 0), /* MX35_PAD_HCKT__IPU_CSI_D_2 */
- [287] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 7, 0x95c, 1), /* MX35_PAD_HCKT__KPP_COL_3 */
- [288] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 0, 0x0, 0), /* MX35_PAD_TX5_RX0__ESAI_TX5_RX0 */
- [289] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 1, 0x0, 0), /* MX35_PAD_TX5_RX0__AUDMUX_AUD4_RXC */
- [290] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 2, 0x7f8, 1), /* MX35_PAD_TX5_RX0__CSPI2_SS2 */
- [291] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 3, 0x0, 0), /* MX35_PAD_TX5_RX0__CAN2_TXCAN */
- [292] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 4, 0x0, 0), /* MX35_PAD_TX5_RX0__UART2_DTR */
- [293] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 5, 0x830, 0), /* MX35_PAD_TX5_RX0__GPIO1_10 */
- [294] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 7, 0x0, 0), /* MX35_PAD_TX5_RX0__EMI_M3IF_CHOSEN_MASTER_0 */
- [295] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 0, 0x0, 0), /* MX35_PAD_TX4_RX1__ESAI_TX4_RX1 */
- [296] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 1, 0x0, 0), /* MX35_PAD_TX4_RX1__AUDMUX_AUD4_RXFS */
- [297] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 2, 0x7fc, 0), /* MX35_PAD_TX4_RX1__CSPI2_SS3 */
- [298] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 3, 0x7cc, 0), /* MX35_PAD_TX4_RX1__CAN2_RXCAN */
- [299] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 4, 0x0, 0), /* MX35_PAD_TX4_RX1__UART2_DSR */
- [300] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 5, 0x834, 0), /* MX35_PAD_TX4_RX1__GPIO1_11 */
- [301] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 6, 0x93c, 0), /* MX35_PAD_TX4_RX1__IPU_CSI_D_3 */
- [302] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 7, 0x970, 1), /* MX35_PAD_TX4_RX1__KPP_ROW_0 */
- [303] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 0, 0x0, 0), /* MX35_PAD_TX3_RX2__ESAI_TX3_RX2 */
- [304] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 1, 0x91c, 0), /* MX35_PAD_TX3_RX2__I2C3_SCL */
- [305] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 3, 0x0, 0), /* MX35_PAD_TX3_RX2__EMI_NANDF_CE1 */
- [306] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 5, 0x0, 0), /* MX35_PAD_TX3_RX2__GPIO1_12 */
- [307] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 6, 0x940, 0), /* MX35_PAD_TX3_RX2__IPU_CSI_D_4 */
- [308] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 7, 0x974, 1), /* MX35_PAD_TX3_RX2__KPP_ROW_1 */
- [309] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 0, 0x0, 0), /* MX35_PAD_TX2_RX3__ESAI_TX2_RX3 */
- [310] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 1, 0x920, 0), /* MX35_PAD_TX2_RX3__I2C3_SDA */
- [311] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 3, 0x0, 0), /* MX35_PAD_TX2_RX3__EMI_NANDF_CE2 */
- [312] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 5, 0x0, 0), /* MX35_PAD_TX2_RX3__GPIO1_13 */
- [313] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 6, 0x944, 0), /* MX35_PAD_TX2_RX3__IPU_CSI_D_5 */
- [314] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 7, 0x950, 1), /* MX35_PAD_TX2_RX3__KPP_COL_0 */
- [315] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 0, 0x0, 0), /* MX35_PAD_TX1__ESAI_TX1 */
- [316] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 1, 0x7d4, 1), /* MX35_PAD_TX1__CCM_PMIC_RDY */
- [317] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 2, 0x7d8, 2), /* MX35_PAD_TX1__CSPI1_SS2 */
- [318] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 3, 0x0, 0), /* MX35_PAD_TX1__EMI_NANDF_CE3 */
- [319] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 4, 0x0, 0), /* MX35_PAD_TX1__UART2_RI */
- [320] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 5, 0x0, 0), /* MX35_PAD_TX1__GPIO1_14 */
- [321] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 6, 0x948, 0), /* MX35_PAD_TX1__IPU_CSI_D_6 */
- [322] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 7, 0x954, 1), /* MX35_PAD_TX1__KPP_COL_1 */
- [323] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 0, 0x0, 0), /* MX35_PAD_TX0__ESAI_TX0 */
- [324] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 1, 0x994, 1), /* MX35_PAD_TX0__SPDIF_SPDIF_EXTCLK */
- [325] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 2, 0x7dc, 0), /* MX35_PAD_TX0__CSPI1_SS3 */
- [326] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 3, 0x800, 1), /* MX35_PAD_TX0__EMI_DTACK_B */
- [327] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 4, 0x0, 0), /* MX35_PAD_TX0__UART2_DCD */
- [328] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 5, 0x0, 0), /* MX35_PAD_TX0__GPIO1_15 */
- [329] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 6, 0x94c, 0), /* MX35_PAD_TX0__IPU_CSI_D_7 */
- [330] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 7, 0x958, 1), /* MX35_PAD_TX0__KPP_COL_2 */
- [331] = IMX_PIN_REG(MX35_PAD_CSPI1_MOSI, 0x5b4, 0x170, 0, 0x0, 0), /* MX35_PAD_CSPI1_MOSI__CSPI1_MOSI */
- [332] = IMX_PIN_REG(MX35_PAD_CSPI1_MOSI, 0x5b4, 0x170, 5, 0x0, 0), /* MX35_PAD_CSPI1_MOSI__GPIO1_16 */
- [333] = IMX_PIN_REG(MX35_PAD_CSPI1_MOSI, 0x5b4, 0x170, 7, 0x0, 0), /* MX35_PAD_CSPI1_MOSI__ECT_CTI_TRIG_OUT1_2 */
- [334] = IMX_PIN_REG(MX35_PAD_CSPI1_MISO, 0x5b8, 0x174, 0, 0x0, 0), /* MX35_PAD_CSPI1_MISO__CSPI1_MISO */
- [335] = IMX_PIN_REG(MX35_PAD_CSPI1_MISO, 0x5b8, 0x174, 5, 0x0, 0), /* MX35_PAD_CSPI1_MISO__GPIO1_17 */
- [336] = IMX_PIN_REG(MX35_PAD_CSPI1_MISO, 0x5b8, 0x174, 7, 0x0, 0), /* MX35_PAD_CSPI1_MISO__ECT_CTI_TRIG_OUT1_3 */
- [337] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 0, 0x0, 0), /* MX35_PAD_CSPI1_SS0__CSPI1_SS0 */
- [338] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 1, 0x990, 1), /* MX35_PAD_CSPI1_SS0__OWIRE_LINE */
- [339] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 2, 0x7fc, 1), /* MX35_PAD_CSPI1_SS0__CSPI2_SS3 */
- [340] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 5, 0x0, 0), /* MX35_PAD_CSPI1_SS0__GPIO1_18 */
- [341] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 7, 0x0, 0), /* MX35_PAD_CSPI1_SS0__ECT_CTI_TRIG_OUT1_4 */
- [342] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 0, 0x0, 0), /* MX35_PAD_CSPI1_SS1__CSPI1_SS1 */
- [343] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 1, 0x0, 0), /* MX35_PAD_CSPI1_SS1__PWM_PWMO */
- [344] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 2, 0x7d0, 1), /* MX35_PAD_CSPI1_SS1__CCM_CLK32K */
- [345] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 5, 0x0, 0), /* MX35_PAD_CSPI1_SS1__GPIO1_19 */
- [346] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 6, 0x0, 0), /* MX35_PAD_CSPI1_SS1__IPU_DIAGB_29 */
- [347] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 7, 0x0, 0), /* MX35_PAD_CSPI1_SS1__ECT_CTI_TRIG_OUT1_5 */
- [348] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 0, 0x0, 0), /* MX35_PAD_CSPI1_SCLK__CSPI1_SCLK */
- [349] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 5, 0x904, 0), /* MX35_PAD_CSPI1_SCLK__GPIO3_4 */
- [350] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 6, 0x0, 0), /* MX35_PAD_CSPI1_SCLK__IPU_DIAGB_30 */
- [351] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 7, 0x0, 0), /* MX35_PAD_CSPI1_SCLK__EMI_M3IF_CHOSEN_MASTER_1 */
- [352] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 0, 0x0, 0), /* MX35_PAD_CSPI1_SPI_RDY__CSPI1_RDY */
- [353] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 5, 0x908, 0), /* MX35_PAD_CSPI1_SPI_RDY__GPIO3_5 */
- [354] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 6, 0x0, 0), /* MX35_PAD_CSPI1_SPI_RDY__IPU_DIAGB_31 */
- [355] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 7, 0x0, 0), /* MX35_PAD_CSPI1_SPI_RDY__EMI_M3IF_CHOSEN_MASTER_2 */
- [356] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 0, 0x0, 0), /* MX35_PAD_RXD1__UART1_RXD_MUX */
- [357] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 1, 0x7ec, 1), /* MX35_PAD_RXD1__CSPI2_MOSI */
- [358] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 4, 0x960, 0), /* MX35_PAD_RXD1__KPP_COL_4 */
- [359] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 5, 0x90c, 0), /* MX35_PAD_RXD1__GPIO3_6 */
- [360] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 7, 0x0, 0), /* MX35_PAD_RXD1__ARM11P_TOP_EVNTBUS_16 */
- [361] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 0, 0x0, 0), /* MX35_PAD_TXD1__UART1_TXD_MUX */
- [362] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 1, 0x7e8, 1), /* MX35_PAD_TXD1__CSPI2_MISO */
- [363] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 4, 0x964, 0), /* MX35_PAD_TXD1__KPP_COL_5 */
- [364] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 5, 0x910, 0), /* MX35_PAD_TXD1__GPIO3_7 */
- [365] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 7, 0x0, 0), /* MX35_PAD_TXD1__ARM11P_TOP_EVNTBUS_17 */
- [366] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 0, 0x0, 0), /* MX35_PAD_RTS1__UART1_RTS */
- [367] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 1, 0x7e0, 1), /* MX35_PAD_RTS1__CSPI2_SCLK */
- [368] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 2, 0x91c, 1), /* MX35_PAD_RTS1__I2C3_SCL */
- [369] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 3, 0x930, 1), /* MX35_PAD_RTS1__IPU_CSI_D_0 */
- [370] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 4, 0x968, 0), /* MX35_PAD_RTS1__KPP_COL_6 */
- [371] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 5, 0x914, 0), /* MX35_PAD_RTS1__GPIO3_8 */
- [372] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 6, 0x0, 0), /* MX35_PAD_RTS1__EMI_NANDF_CE1 */
- [373] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 7, 0x0, 0), /* MX35_PAD_RTS1__ARM11P_TOP_EVNTBUS_18 */
- [374] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 0, 0x0, 0), /* MX35_PAD_CTS1__UART1_CTS */
- [375] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 1, 0x7e4, 1), /* MX35_PAD_CTS1__CSPI2_RDY */
- [376] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 2, 0x920, 1), /* MX35_PAD_CTS1__I2C3_SDA */
- [377] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 3, 0x934, 1), /* MX35_PAD_CTS1__IPU_CSI_D_1 */
- [378] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 4, 0x96c, 0), /* MX35_PAD_CTS1__KPP_COL_7 */
- [379] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 5, 0x918, 0), /* MX35_PAD_CTS1__GPIO3_9 */
- [380] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 6, 0x0, 0), /* MX35_PAD_CTS1__EMI_NANDF_CE2 */
- [381] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 7, 0x0, 0), /* MX35_PAD_CTS1__ARM11P_TOP_EVNTBUS_19 */
- [382] = IMX_PIN_REG(MX35_PAD_RXD2, 0x5dc, 0x198, 0, 0x0, 0), /* MX35_PAD_RXD2__UART2_RXD_MUX */
- [383] = IMX_PIN_REG(MX35_PAD_RXD2, 0x5dc, 0x198, 4, 0x980, 0), /* MX35_PAD_RXD2__KPP_ROW_4 */
- [384] = IMX_PIN_REG(MX35_PAD_RXD2, 0x5dc, 0x198, 5, 0x8ec, 0), /* MX35_PAD_RXD2__GPIO3_10 */
- [385] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 0, 0x0, 0), /* MX35_PAD_TXD2__UART2_TXD_MUX */
- [386] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 1, 0x994, 2), /* MX35_PAD_TXD2__SPDIF_SPDIF_EXTCLK */
- [387] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 4, 0x984, 0), /* MX35_PAD_TXD2__KPP_ROW_5 */
- [388] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 5, 0x8f0, 0), /* MX35_PAD_TXD2__GPIO3_11 */
- [389] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 0, 0x0, 0), /* MX35_PAD_RTS2__UART2_RTS */
- [390] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 1, 0x998, 1), /* MX35_PAD_RTS2__SPDIF_SPDIF_IN1 */
- [391] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 2, 0x7cc, 1), /* MX35_PAD_RTS2__CAN2_RXCAN */
- [392] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 3, 0x938, 1), /* MX35_PAD_RTS2__IPU_CSI_D_2 */
- [393] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 4, 0x988, 0), /* MX35_PAD_RTS2__KPP_ROW_6 */
- [394] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 5, 0x8f4, 0), /* MX35_PAD_RTS2__GPIO3_12 */
- [395] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 6, 0x0, 0), /* MX35_PAD_RTS2__AUDMUX_AUD5_RXC */
- [396] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 7, 0x9a0, 0), /* MX35_PAD_RTS2__UART3_RXD_MUX */
- [397] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 0, 0x0, 0), /* MX35_PAD_CTS2__UART2_CTS */
- [398] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 1, 0x0, 0), /* MX35_PAD_CTS2__SPDIF_SPDIF_OUT1 */
- [399] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 2, 0x0, 0), /* MX35_PAD_CTS2__CAN2_TXCAN */
- [400] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 3, 0x93c, 1), /* MX35_PAD_CTS2__IPU_CSI_D_3 */
- [401] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 4, 0x98c, 0), /* MX35_PAD_CTS2__KPP_ROW_7 */
- [402] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 5, 0x8f8, 0), /* MX35_PAD_CTS2__GPIO3_13 */
- [403] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 6, 0x0, 0), /* MX35_PAD_CTS2__AUDMUX_AUD5_RXFS */
- [404] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 7, 0x0, 0), /* MX35_PAD_CTS2__UART3_TXD_MUX */
- [405] = IMX_PIN_REG(MX35_PAD_RTCK, 0x5ec, 0x0, 0, 0x0, 0), /* MX35_PAD_RTCK__ARM11P_TOP_RTCK */
- [406] = IMX_PIN_REG(MX35_PAD_TCK, 0x5f0, 0x0, 0, 0x0, 0), /* MX35_PAD_TCK__SJC_TCK */
- [407] = IMX_PIN_REG(MX35_PAD_TMS, 0x5f4, 0x0, 0, 0x0, 0), /* MX35_PAD_TMS__SJC_TMS */
- [408] = IMX_PIN_REG(MX35_PAD_TDI, 0x5f8, 0x0, 0, 0x0, 0), /* MX35_PAD_TDI__SJC_TDI */
- [409] = IMX_PIN_REG(MX35_PAD_TDO, 0x5fc, 0x0, 0, 0x0, 0), /* MX35_PAD_TDO__SJC_TDO */
- [410] = IMX_PIN_REG(MX35_PAD_TRSTB, 0x600, 0x0, 0, 0x0, 0), /* MX35_PAD_TRSTB__SJC_TRSTB */
- [411] = IMX_PIN_REG(MX35_PAD_DE_B, 0x604, 0x0, 0, 0x0, 0), /* MX35_PAD_DE_B__SJC_DE_B */
- [412] = IMX_PIN_REG(MX35_PAD_SJC_MOD, 0x608, 0x0, 0, 0x0, 0), /* MX35_PAD_SJC_MOD__SJC_MOD */
- [413] = IMX_PIN_REG(MX35_PAD_USBOTG_PWR, 0x60c, 0x1a8, 0, 0x0, 0), /* MX35_PAD_USBOTG_PWR__USB_TOP_USBOTG_PWR */
- [414] = IMX_PIN_REG(MX35_PAD_USBOTG_PWR, 0x60c, 0x1a8, 1, 0x0, 0), /* MX35_PAD_USBOTG_PWR__USB_TOP_USBH2_PWR */
- [415] = IMX_PIN_REG(MX35_PAD_USBOTG_PWR, 0x60c, 0x1a8, 5, 0x8fc, 0), /* MX35_PAD_USBOTG_PWR__GPIO3_14 */
- [416] = IMX_PIN_REG(MX35_PAD_USBOTG_OC, 0x610, 0x1ac, 0, 0x0, 0), /* MX35_PAD_USBOTG_OC__USB_TOP_USBOTG_OC */
- [417] = IMX_PIN_REG(MX35_PAD_USBOTG_OC, 0x610, 0x1ac, 1, 0x9f4, 1), /* MX35_PAD_USBOTG_OC__USB_TOP_USBH2_OC */
- [418] = IMX_PIN_REG(MX35_PAD_USBOTG_OC, 0x610, 0x1ac, 5, 0x900, 0), /* MX35_PAD_USBOTG_OC__GPIO3_15 */
- [419] = IMX_PIN_REG(MX35_PAD_LD0, 0x614, 0x1b0, 0, 0x0, 0), /* MX35_PAD_LD0__IPU_DISPB_DAT_0 */
- [420] = IMX_PIN_REG(MX35_PAD_LD0, 0x614, 0x1b0, 5, 0x868, 1), /* MX35_PAD_LD0__GPIO2_0 */
- [421] = IMX_PIN_REG(MX35_PAD_LD0, 0x614, 0x1b0, 6, 0x0, 0), /* MX35_PAD_LD0__SDMA_SDMA_DEBUG_PC_0 */
- [422] = IMX_PIN_REG(MX35_PAD_LD1, 0x618, 0x1b4, 0, 0x0, 0), /* MX35_PAD_LD1__IPU_DISPB_DAT_1 */
- [423] = IMX_PIN_REG(MX35_PAD_LD1, 0x618, 0x1b4, 5, 0x894, 0), /* MX35_PAD_LD1__GPIO2_1 */
- [424] = IMX_PIN_REG(MX35_PAD_LD1, 0x618, 0x1b4, 6, 0x0, 0), /* MX35_PAD_LD1__SDMA_SDMA_DEBUG_PC_1 */
- [425] = IMX_PIN_REG(MX35_PAD_LD2, 0x61c, 0x1b8, 0, 0x0, 0), /* MX35_PAD_LD2__IPU_DISPB_DAT_2 */
- [426] = IMX_PIN_REG(MX35_PAD_LD2, 0x61c, 0x1b8, 5, 0x8c0, 0), /* MX35_PAD_LD2__GPIO2_2 */
- [427] = IMX_PIN_REG(MX35_PAD_LD2, 0x61c, 0x1b8, 6, 0x0, 0), /* MX35_PAD_LD2__SDMA_SDMA_DEBUG_PC_2 */
- [428] = IMX_PIN_REG(MX35_PAD_LD3, 0x620, 0x1bc, 0, 0x0, 0), /* MX35_PAD_LD3__IPU_DISPB_DAT_3 */
- [429] = IMX_PIN_REG(MX35_PAD_LD3, 0x620, 0x1bc, 5, 0x8cc, 0), /* MX35_PAD_LD3__GPIO2_3 */
- [430] = IMX_PIN_REG(MX35_PAD_LD3, 0x620, 0x1bc, 6, 0x0, 0), /* MX35_PAD_LD3__SDMA_SDMA_DEBUG_PC_3 */
- [431] = IMX_PIN_REG(MX35_PAD_LD4, 0x624, 0x1c0, 0, 0x0, 0), /* MX35_PAD_LD4__IPU_DISPB_DAT_4 */
- [432] = IMX_PIN_REG(MX35_PAD_LD4, 0x624, 0x1c0, 5, 0x8d0, 0), /* MX35_PAD_LD4__GPIO2_4 */
- [433] = IMX_PIN_REG(MX35_PAD_LD4, 0x624, 0x1c0, 6, 0x0, 0), /* MX35_PAD_LD4__SDMA_SDMA_DEBUG_PC_4 */
- [434] = IMX_PIN_REG(MX35_PAD_LD5, 0x628, 0x1c4, 0, 0x0, 0), /* MX35_PAD_LD5__IPU_DISPB_DAT_5 */
- [435] = IMX_PIN_REG(MX35_PAD_LD5, 0x628, 0x1c4, 5, 0x8d4, 0), /* MX35_PAD_LD5__GPIO2_5 */
- [436] = IMX_PIN_REG(MX35_PAD_LD5, 0x628, 0x1c4, 6, 0x0, 0), /* MX35_PAD_LD5__SDMA_SDMA_DEBUG_PC_5 */
- [437] = IMX_PIN_REG(MX35_PAD_LD6, 0x62c, 0x1c8, 0, 0x0, 0), /* MX35_PAD_LD6__IPU_DISPB_DAT_6 */
- [438] = IMX_PIN_REG(MX35_PAD_LD6, 0x62c, 0x1c8, 5, 0x8d8, 0), /* MX35_PAD_LD6__GPIO2_6 */
- [439] = IMX_PIN_REG(MX35_PAD_LD6, 0x62c, 0x1c8, 6, 0x0, 0), /* MX35_PAD_LD6__SDMA_SDMA_DEBUG_PC_6 */
- [440] = IMX_PIN_REG(MX35_PAD_LD7, 0x630, 0x1cc, 0, 0x0, 0), /* MX35_PAD_LD7__IPU_DISPB_DAT_7 */
- [441] = IMX_PIN_REG(MX35_PAD_LD7, 0x630, 0x1cc, 5, 0x8dc, 0), /* MX35_PAD_LD7__GPIO2_7 */
- [442] = IMX_PIN_REG(MX35_PAD_LD7, 0x630, 0x1cc, 6, 0x0, 0), /* MX35_PAD_LD7__SDMA_SDMA_DEBUG_PC_7 */
- [443] = IMX_PIN_REG(MX35_PAD_LD8, 0x634, 0x1d0, 0, 0x0, 0), /* MX35_PAD_LD8__IPU_DISPB_DAT_8 */
- [444] = IMX_PIN_REG(MX35_PAD_LD8, 0x634, 0x1d0, 5, 0x8e0, 0), /* MX35_PAD_LD8__GPIO2_8 */
- [445] = IMX_PIN_REG(MX35_PAD_LD8, 0x634, 0x1d0, 6, 0x0, 0), /* MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 */
- [446] = IMX_PIN_REG(MX35_PAD_LD9, 0x638, 0x1d4, 0, 0x0, 0), /* MX35_PAD_LD9__IPU_DISPB_DAT_9 */
- [447] = IMX_PIN_REG(MX35_PAD_LD9, 0x638, 0x1d4, 5, 0x8e4, 0), /* MX35_PAD_LD9__GPIO2_9 */
- [448] = IMX_PIN_REG(MX35_PAD_LD9, 0x638, 0x1d4, 6, 0x0, 0), /* MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 */
- [449] = IMX_PIN_REG(MX35_PAD_LD10, 0x63c, 0x1d8, 0, 0x0, 0), /* MX35_PAD_LD10__IPU_DISPB_DAT_10 */
- [450] = IMX_PIN_REG(MX35_PAD_LD10, 0x63c, 0x1d8, 5, 0x86c, 0), /* MX35_PAD_LD10__GPIO2_10 */
- [451] = IMX_PIN_REG(MX35_PAD_LD10, 0x63c, 0x1d8, 6, 0x0, 0), /* MX35_PAD_LD10__SDMA_SDMA_DEBUG_PC_10 */
- [452] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 0, 0x0, 0), /* MX35_PAD_LD11__IPU_DISPB_DAT_11 */
- [453] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 5, 0x870, 0), /* MX35_PAD_LD11__GPIO2_11 */
- [454] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 6, 0x0, 0), /* MX35_PAD_LD11__SDMA_SDMA_DEBUG_PC_11 */
- [455] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 7, 0x0, 0), /* MX35_PAD_LD11__ARM11P_TOP_TRACE_4 */
- [456] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 0, 0x0, 0), /* MX35_PAD_LD12__IPU_DISPB_DAT_12 */
- [457] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 5, 0x874, 0), /* MX35_PAD_LD12__GPIO2_12 */
- [458] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 6, 0x0, 0), /* MX35_PAD_LD12__SDMA_SDMA_DEBUG_PC_12 */
- [459] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 7, 0x0, 0), /* MX35_PAD_LD12__ARM11P_TOP_TRACE_5 */
- [460] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 0, 0x0, 0), /* MX35_PAD_LD13__IPU_DISPB_DAT_13 */
- [461] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 5, 0x878, 0), /* MX35_PAD_LD13__GPIO2_13 */
- [462] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 6, 0x0, 0), /* MX35_PAD_LD13__SDMA_SDMA_DEBUG_PC_13 */
- [463] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 7, 0x0, 0), /* MX35_PAD_LD13__ARM11P_TOP_TRACE_6 */
- [464] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 0, 0x0, 0), /* MX35_PAD_LD14__IPU_DISPB_DAT_14 */
- [465] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 5, 0x87c, 0), /* MX35_PAD_LD14__GPIO2_14 */
- [466] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 6, 0x0, 0), /* MX35_PAD_LD14__SDMA_SDMA_DEBUG_EVENT_CHANNEL_0 */
- [467] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 7, 0x0, 0), /* MX35_PAD_LD14__ARM11P_TOP_TRACE_7 */
- [468] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 0, 0x0, 0), /* MX35_PAD_LD15__IPU_DISPB_DAT_15 */
- [469] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 5, 0x880, 0), /* MX35_PAD_LD15__GPIO2_15 */
- [470] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 6, 0x0, 0), /* MX35_PAD_LD15__SDMA_SDMA_DEBUG_EVENT_CHANNEL_1 */
- [471] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 7, 0x0, 0), /* MX35_PAD_LD15__ARM11P_TOP_TRACE_8 */
- [472] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 0, 0x0, 0), /* MX35_PAD_LD16__IPU_DISPB_DAT_16 */
- [473] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 2, 0x928, 0), /* MX35_PAD_LD16__IPU_DISPB_D12_VSYNC */
- [474] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 5, 0x884, 0), /* MX35_PAD_LD16__GPIO2_16 */
- [475] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 6, 0x0, 0), /* MX35_PAD_LD16__SDMA_SDMA_DEBUG_EVENT_CHANNEL_2 */
- [476] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 7, 0x0, 0), /* MX35_PAD_LD16__ARM11P_TOP_TRACE_9 */
- [477] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 0, 0x0, 0), /* MX35_PAD_LD17__IPU_DISPB_DAT_17 */
- [478] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 2, 0x0, 0), /* MX35_PAD_LD17__IPU_DISPB_CS2 */
- [479] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 5, 0x888, 0), /* MX35_PAD_LD17__GPIO2_17 */
- [480] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 6, 0x0, 0), /* MX35_PAD_LD17__SDMA_SDMA_DEBUG_EVENT_CHANNEL_3 */
- [481] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 7, 0x0, 0), /* MX35_PAD_LD17__ARM11P_TOP_TRACE_10 */
- [482] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 0, 0x0, 0), /* MX35_PAD_LD18__IPU_DISPB_DAT_18 */
- [483] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 1, 0x924, 1), /* MX35_PAD_LD18__IPU_DISPB_D0_VSYNC */
- [484] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 2, 0x928, 1), /* MX35_PAD_LD18__IPU_DISPB_D12_VSYNC */
- [485] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 3, 0x818, 0), /* MX35_PAD_LD18__ESDHC3_CMD */
- [486] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 4, 0x9b0, 0), /* MX35_PAD_LD18__USB_TOP_USBOTG_DATA_3 */
- [487] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 5, 0x0, 0), /* MX35_PAD_LD18__GPIO3_24 */
- [488] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 6, 0x0, 0), /* MX35_PAD_LD18__SDMA_SDMA_DEBUG_EVENT_CHANNEL_4 */
- [489] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 7, 0x0, 0), /* MX35_PAD_LD18__ARM11P_TOP_TRACE_11 */
- [490] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 0, 0x0, 0), /* MX35_PAD_LD19__IPU_DISPB_DAT_19 */
- [491] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 1, 0x0, 0), /* MX35_PAD_LD19__IPU_DISPB_BCLK */
- [492] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 2, 0x0, 0), /* MX35_PAD_LD19__IPU_DISPB_CS1 */
- [493] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 3, 0x814, 0), /* MX35_PAD_LD19__ESDHC3_CLK */
- [494] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 4, 0x9c4, 0), /* MX35_PAD_LD19__USB_TOP_USBOTG_DIR */
- [495] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 5, 0x0, 0), /* MX35_PAD_LD19__GPIO3_25 */
- [496] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 6, 0x0, 0), /* MX35_PAD_LD19__SDMA_SDMA_DEBUG_EVENT_CHANNEL_5 */
- [497] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 7, 0x0, 0), /* MX35_PAD_LD19__ARM11P_TOP_TRACE_12 */
- [498] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 0, 0x0, 0), /* MX35_PAD_LD20__IPU_DISPB_DAT_20 */
- [499] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 1, 0x0, 0), /* MX35_PAD_LD20__IPU_DISPB_CS0 */
- [500] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 2, 0x0, 0), /* MX35_PAD_LD20__IPU_DISPB_SD_CLK */
- [501] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 3, 0x81c, 0), /* MX35_PAD_LD20__ESDHC3_DAT0 */
- [502] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 5, 0x0, 0), /* MX35_PAD_LD20__GPIO3_26 */
- [503] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 6, 0x0, 0), /* MX35_PAD_LD20__SDMA_SDMA_DEBUG_CORE_STATUS_3 */
- [504] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 7, 0x0, 0), /* MX35_PAD_LD20__ARM11P_TOP_TRACE_13 */
- [505] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 0, 0x0, 0), /* MX35_PAD_LD21__IPU_DISPB_DAT_21 */
- [506] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 1, 0x0, 0), /* MX35_PAD_LD21__IPU_DISPB_PAR_RS */
- [507] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 2, 0x0, 0), /* MX35_PAD_LD21__IPU_DISPB_SER_RS */
- [508] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 3, 0x820, 0), /* MX35_PAD_LD21__ESDHC3_DAT1 */
- [509] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 4, 0x0, 0), /* MX35_PAD_LD21__USB_TOP_USBOTG_STP */
- [510] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 5, 0x0, 0), /* MX35_PAD_LD21__GPIO3_27 */
- [511] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 6, 0x0, 0), /* MX35_PAD_LD21__SDMA_DEBUG_EVENT_CHANNEL_SEL */
- [512] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 7, 0x0, 0), /* MX35_PAD_LD21__ARM11P_TOP_TRACE_14 */
- [513] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 0, 0x0, 0), /* MX35_PAD_LD22__IPU_DISPB_DAT_22 */
- [514] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 1, 0x0, 0), /* MX35_PAD_LD22__IPU_DISPB_WR */
- [515] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 2, 0x92c, 0), /* MX35_PAD_LD22__IPU_DISPB_SD_D_I */
- [516] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 3, 0x824, 0), /* MX35_PAD_LD22__ESDHC3_DAT2 */
- [517] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 4, 0x9c8, 0), /* MX35_PAD_LD22__USB_TOP_USBOTG_NXT */
- [518] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 5, 0x0, 0), /* MX35_PAD_LD22__GPIO3_28 */
- [519] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 6, 0x0, 0), /* MX35_PAD_LD22__SDMA_DEBUG_BUS_ERROR */
- [520] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 7, 0x0, 0), /* MX35_PAD_LD22__ARM11P_TOP_TRCTL */
- [521] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 0, 0x0, 0), /* MX35_PAD_LD23__IPU_DISPB_DAT_23 */
- [522] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 1, 0x0, 0), /* MX35_PAD_LD23__IPU_DISPB_RD */
- [523] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 2, 0x92c, 1), /* MX35_PAD_LD23__IPU_DISPB_SD_D_IO */
- [524] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 3, 0x828, 0), /* MX35_PAD_LD23__ESDHC3_DAT3 */
- [525] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 4, 0x9c0, 0), /* MX35_PAD_LD23__USB_TOP_USBOTG_DATA_7 */
- [526] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 5, 0x0, 0), /* MX35_PAD_LD23__GPIO3_29 */
- [527] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 6, 0x0, 0), /* MX35_PAD_LD23__SDMA_DEBUG_MATCHED_DMBUS */
- [528] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 7, 0x0, 0), /* MX35_PAD_LD23__ARM11P_TOP_TRCLK */
- [529] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 0, 0x0, 0), /* MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC */
- [530] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 2, 0x92c, 2), /* MX35_PAD_D3_HSYNC__IPU_DISPB_SD_D_IO */
- [531] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 5, 0x0, 0), /* MX35_PAD_D3_HSYNC__GPIO3_30 */
- [532] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 6, 0x0, 0), /* MX35_PAD_D3_HSYNC__SDMA_DEBUG_RTBUFFER_WRITE */
- [533] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 7, 0x0, 0), /* MX35_PAD_D3_HSYNC__ARM11P_TOP_TRACE_15 */
- [534] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 0, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK */
- [535] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 2, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__IPU_DISPB_SD_CLK */
- [536] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 5, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__GPIO3_31 */
- [537] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 6, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__SDMA_SDMA_DEBUG_CORE_STATUS_0 */
- [538] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 7, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__ARM11P_TOP_TRACE_16 */
- [539] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 0, 0x0, 0), /* MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY */
- [540] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 2, 0x0, 0), /* MX35_PAD_D3_DRDY__IPU_DISPB_SD_D_O */
- [541] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 5, 0x82c, 2), /* MX35_PAD_D3_DRDY__GPIO1_0 */
- [542] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 6, 0x0, 0), /* MX35_PAD_D3_DRDY__SDMA_SDMA_DEBUG_CORE_STATUS_1 */
- [543] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 7, 0x0, 0), /* MX35_PAD_D3_DRDY__ARM11P_TOP_TRACE_17 */
- [544] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 0, 0x0, 0), /* MX35_PAD_CONTRAST__IPU_DISPB_CONTR */
- [545] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 5, 0x838, 2), /* MX35_PAD_CONTRAST__GPIO1_1 */
- [546] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 6, 0x0, 0), /* MX35_PAD_CONTRAST__SDMA_SDMA_DEBUG_CORE_STATUS_2 */
- [547] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 7, 0x0, 0), /* MX35_PAD_CONTRAST__ARM11P_TOP_TRACE_18 */
- [548] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 0, 0x0, 0), /* MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC */
- [549] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 2, 0x0, 0), /* MX35_PAD_D3_VSYNC__IPU_DISPB_CS1 */
- [550] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 5, 0x848, 1), /* MX35_PAD_D3_VSYNC__GPIO1_2 */
- [551] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 6, 0x0, 0), /* MX35_PAD_D3_VSYNC__SDMA_DEBUG_YIELD */
- [552] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 7, 0x0, 0), /* MX35_PAD_D3_VSYNC__ARM11P_TOP_TRACE_19 */
- [553] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 0, 0x0, 0), /* MX35_PAD_D3_REV__IPU_DISPB_D3_REV */
- [554] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 2, 0x0, 0), /* MX35_PAD_D3_REV__IPU_DISPB_SER_RS */
- [555] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 5, 0x84c, 1), /* MX35_PAD_D3_REV__GPIO1_3 */
- [556] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 6, 0x0, 0), /* MX35_PAD_D3_REV__SDMA_DEBUG_BUS_RWB */
- [557] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 7, 0x0, 0), /* MX35_PAD_D3_REV__ARM11P_TOP_TRACE_20 */
- [558] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 0, 0x0, 0), /* MX35_PAD_D3_CLS__IPU_DISPB_D3_CLS */
- [559] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 2, 0x0, 0), /* MX35_PAD_D3_CLS__IPU_DISPB_CS2 */
- [560] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 5, 0x850, 2), /* MX35_PAD_D3_CLS__GPIO1_4 */
- [561] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 6, 0x0, 0), /* MX35_PAD_D3_CLS__SDMA_DEBUG_BUS_DEVICE_0 */
- [562] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 7, 0x0, 0), /* MX35_PAD_D3_CLS__ARM11P_TOP_TRACE_21 */
- [563] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 0, 0x0, 0), /* MX35_PAD_D3_SPL__IPU_DISPB_D3_SPL */
- [564] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 2, 0x928, 2), /* MX35_PAD_D3_SPL__IPU_DISPB_D12_VSYNC */
- [565] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 5, 0x854, 2), /* MX35_PAD_D3_SPL__GPIO1_5 */
- [566] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 6, 0x0, 0), /* MX35_PAD_D3_SPL__SDMA_DEBUG_BUS_DEVICE_1 */
- [567] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 7, 0x0, 0), /* MX35_PAD_D3_SPL__ARM11P_TOP_TRACE_22 */
- [568] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 0, 0x0, 0), /* MX35_PAD_SD1_CMD__ESDHC1_CMD */
- [569] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 1, 0x0, 0), /* MX35_PAD_SD1_CMD__MSHC_SCLK */
- [570] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 3, 0x924, 2), /* MX35_PAD_SD1_CMD__IPU_DISPB_D0_VSYNC */
- [571] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 4, 0x9b4, 0), /* MX35_PAD_SD1_CMD__USB_TOP_USBOTG_DATA_4 */
- [572] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 5, 0x858, 2), /* MX35_PAD_SD1_CMD__GPIO1_6 */
- [573] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 7, 0x0, 0), /* MX35_PAD_SD1_CMD__ARM11P_TOP_TRCTL */
- [574] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 0, 0x0, 0), /* MX35_PAD_SD1_CLK__ESDHC1_CLK */
- [575] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 1, 0x0, 0), /* MX35_PAD_SD1_CLK__MSHC_BS */
- [576] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 3, 0x0, 0), /* MX35_PAD_SD1_CLK__IPU_DISPB_BCLK */
- [577] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 4, 0x9b8, 0), /* MX35_PAD_SD1_CLK__USB_TOP_USBOTG_DATA_5 */
- [578] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 5, 0x85c, 2), /* MX35_PAD_SD1_CLK__GPIO1_7 */
- [579] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 7, 0x0, 0), /* MX35_PAD_SD1_CLK__ARM11P_TOP_TRCLK */
- [580] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 0, 0x0, 0), /* MX35_PAD_SD1_DATA0__ESDHC1_DAT0 */
- [581] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 1, 0x0, 0), /* MX35_PAD_SD1_DATA0__MSHC_DATA_0 */
- [582] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 3, 0x0, 0), /* MX35_PAD_SD1_DATA0__IPU_DISPB_CS0 */
- [583] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 4, 0x9bc, 0), /* MX35_PAD_SD1_DATA0__USB_TOP_USBOTG_DATA_6 */
- [584] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 5, 0x860, 2), /* MX35_PAD_SD1_DATA0__GPIO1_8 */
- [585] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 7, 0x0, 0), /* MX35_PAD_SD1_DATA0__ARM11P_TOP_TRACE_23 */
- [586] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 0, 0x0, 0), /* MX35_PAD_SD1_DATA1__ESDHC1_DAT1 */
- [587] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 1, 0x0, 0), /* MX35_PAD_SD1_DATA1__MSHC_DATA_1 */
- [588] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 3, 0x0, 0), /* MX35_PAD_SD1_DATA1__IPU_DISPB_PAR_RS */
- [589] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 4, 0x9a4, 0), /* MX35_PAD_SD1_DATA1__USB_TOP_USBOTG_DATA_0 */
- [590] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 5, 0x864, 1), /* MX35_PAD_SD1_DATA1__GPIO1_9 */
- [591] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 7, 0x0, 0), /* MX35_PAD_SD1_DATA1__ARM11P_TOP_TRACE_24 */
- [592] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 0, 0x0, 0), /* MX35_PAD_SD1_DATA2__ESDHC1_DAT2 */
- [593] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 1, 0x0, 0), /* MX35_PAD_SD1_DATA2__MSHC_DATA_2 */
- [594] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 3, 0x0, 0), /* MX35_PAD_SD1_DATA2__IPU_DISPB_WR */
- [595] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 4, 0x9a8, 0), /* MX35_PAD_SD1_DATA2__USB_TOP_USBOTG_DATA_1 */
- [596] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 5, 0x830, 1), /* MX35_PAD_SD1_DATA2__GPIO1_10 */
- [597] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 7, 0x0, 0), /* MX35_PAD_SD1_DATA2__ARM11P_TOP_TRACE_25 */
- [598] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 0, 0x0, 0), /* MX35_PAD_SD1_DATA3__ESDHC1_DAT3 */
- [599] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 1, 0x0, 0), /* MX35_PAD_SD1_DATA3__MSHC_DATA_3 */
- [600] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 3, 0x0, 0), /* MX35_PAD_SD1_DATA3__IPU_DISPB_RD */
- [601] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 4, 0x9ac, 0), /* MX35_PAD_SD1_DATA3__USB_TOP_USBOTG_DATA_2 */
- [602] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 5, 0x834, 1), /* MX35_PAD_SD1_DATA3__GPIO1_11 */
- [603] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 7, 0x0, 0), /* MX35_PAD_SD1_DATA3__ARM11P_TOP_TRACE_26 */
- [604] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 0, 0x0, 0), /* MX35_PAD_SD2_CMD__ESDHC2_CMD */
- [605] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 1, 0x91c, 2), /* MX35_PAD_SD2_CMD__I2C3_SCL */
- [606] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 2, 0x804, 0), /* MX35_PAD_SD2_CMD__ESDHC1_DAT4 */
- [607] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 3, 0x938, 2), /* MX35_PAD_SD2_CMD__IPU_CSI_D_2 */
- [608] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 4, 0x9dc, 0), /* MX35_PAD_SD2_CMD__USB_TOP_USBH2_DATA_4 */
- [609] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 5, 0x868, 2), /* MX35_PAD_SD2_CMD__GPIO2_0 */
- [610] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 6, 0x0, 0), /* MX35_PAD_SD2_CMD__SPDIF_SPDIF_OUT1 */
- [611] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 7, 0x928, 3), /* MX35_PAD_SD2_CMD__IPU_DISPB_D12_VSYNC */
- [612] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 0, 0x0, 0), /* MX35_PAD_SD2_CLK__ESDHC2_CLK */
- [613] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 1, 0x920, 2), /* MX35_PAD_SD2_CLK__I2C3_SDA */
- [614] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 2, 0x808, 0), /* MX35_PAD_SD2_CLK__ESDHC1_DAT5 */
- [615] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 3, 0x93c, 2), /* MX35_PAD_SD2_CLK__IPU_CSI_D_3 */
- [616] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 4, 0x9e0, 0), /* MX35_PAD_SD2_CLK__USB_TOP_USBH2_DATA_5 */
- [617] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 5, 0x894, 1), /* MX35_PAD_SD2_CLK__GPIO2_1 */
- [618] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 6, 0x998, 2), /* MX35_PAD_SD2_CLK__SPDIF_SPDIF_IN1 */
- [619] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 7, 0x0, 0), /* MX35_PAD_SD2_CLK__IPU_DISPB_CS2 */
- [620] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 0, 0x0, 0), /* MX35_PAD_SD2_DATA0__ESDHC2_DAT0 */
- [621] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 1, 0x9a0, 1), /* MX35_PAD_SD2_DATA0__UART3_RXD_MUX */
- [622] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 2, 0x80c, 0), /* MX35_PAD_SD2_DATA0__ESDHC1_DAT6 */
- [623] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 3, 0x940, 1), /* MX35_PAD_SD2_DATA0__IPU_CSI_D_4 */
- [624] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 4, 0x9e4, 0), /* MX35_PAD_SD2_DATA0__USB_TOP_USBH2_DATA_6 */
- [625] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 5, 0x8c0, 1), /* MX35_PAD_SD2_DATA0__GPIO2_2 */
- [626] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 6, 0x994, 3), /* MX35_PAD_SD2_DATA0__SPDIF_SPDIF_EXTCLK */
- [627] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 0, 0x0, 0), /* MX35_PAD_SD2_DATA1__ESDHC2_DAT1 */
- [628] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 1, 0x0, 0), /* MX35_PAD_SD2_DATA1__UART3_TXD_MUX */
- [629] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 2, 0x810, 0), /* MX35_PAD_SD2_DATA1__ESDHC1_DAT7 */
- [630] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 3, 0x944, 1), /* MX35_PAD_SD2_DATA1__IPU_CSI_D_5 */
- [631] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 4, 0x9cc, 0), /* MX35_PAD_SD2_DATA1__USB_TOP_USBH2_DATA_0 */
- [632] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 5, 0x8cc, 1), /* MX35_PAD_SD2_DATA1__GPIO2_3 */
- [633] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 0, 0x0, 0), /* MX35_PAD_SD2_DATA2__ESDHC2_DAT2 */
- [634] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 1, 0x99c, 0), /* MX35_PAD_SD2_DATA2__UART3_RTS */
- [635] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 2, 0x7c8, 1), /* MX35_PAD_SD2_DATA2__CAN1_RXCAN */
- [636] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 3, 0x948, 1), /* MX35_PAD_SD2_DATA2__IPU_CSI_D_6 */
- [637] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 4, 0x9d0, 0), /* MX35_PAD_SD2_DATA2__USB_TOP_USBH2_DATA_1 */
- [638] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 5, 0x8d0, 1), /* MX35_PAD_SD2_DATA2__GPIO2_4 */
- [639] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 0, 0x0, 0), /* MX35_PAD_SD2_DATA3__ESDHC2_DAT3 */
- [640] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 1, 0x0, 0), /* MX35_PAD_SD2_DATA3__UART3_CTS */
- [641] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 2, 0x0, 0), /* MX35_PAD_SD2_DATA3__CAN1_TXCAN */
- [642] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 3, 0x94c, 1), /* MX35_PAD_SD2_DATA3__IPU_CSI_D_7 */
- [643] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 4, 0x9d4, 0), /* MX35_PAD_SD2_DATA3__USB_TOP_USBH2_DATA_2 */
- [644] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 5, 0x8d4, 1), /* MX35_PAD_SD2_DATA3__GPIO2_5 */
- [645] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 0, 0x0, 0), /* MX35_PAD_ATA_CS0__ATA_CS0 */
- [646] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 1, 0x7dc, 1), /* MX35_PAD_ATA_CS0__CSPI1_SS3 */
- [647] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 3, 0x0, 0), /* MX35_PAD_ATA_CS0__IPU_DISPB_CS1 */
- [648] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 5, 0x8d8, 1), /* MX35_PAD_ATA_CS0__GPIO2_6 */
- [649] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 6, 0x0, 0), /* MX35_PAD_ATA_CS0__IPU_DIAGB_0 */
- [650] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 7, 0x0, 0), /* MX35_PAD_ATA_CS0__ARM11P_TOP_MAX1_HMASTER_0 */
- [651] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 0, 0x0, 0), /* MX35_PAD_ATA_CS1__ATA_CS1 */
- [652] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 3, 0x0, 0), /* MX35_PAD_ATA_CS1__IPU_DISPB_CS2 */
- [653] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 4, 0x7f0, 1), /* MX35_PAD_ATA_CS1__CSPI2_SS0 */
- [654] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 5, 0x8dc, 1), /* MX35_PAD_ATA_CS1__GPIO2_7 */
- [655] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 6, 0x0, 0), /* MX35_PAD_ATA_CS1__IPU_DIAGB_1 */
- [656] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 7, 0x0, 0), /* MX35_PAD_ATA_CS1__ARM11P_TOP_MAX1_HMASTER_1 */
- [657] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 0, 0x0, 0), /* MX35_PAD_ATA_DIOR__ATA_DIOR */
- [658] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 1, 0x81c, 1), /* MX35_PAD_ATA_DIOR__ESDHC3_DAT0 */
- [659] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 2, 0x9c4, 1), /* MX35_PAD_ATA_DIOR__USB_TOP_USBOTG_DIR */
- [660] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 3, 0x0, 0), /* MX35_PAD_ATA_DIOR__IPU_DISPB_BE0 */
- [661] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 4, 0x7f4, 1), /* MX35_PAD_ATA_DIOR__CSPI2_SS1 */
- [662] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 5, 0x8e0, 1), /* MX35_PAD_ATA_DIOR__GPIO2_8 */
- [663] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 6, 0x0, 0), /* MX35_PAD_ATA_DIOR__IPU_DIAGB_2 */
- [664] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 7, 0x0, 0), /* MX35_PAD_ATA_DIOR__ARM11P_TOP_MAX1_HMASTER_2 */
- [665] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 0, 0x0, 0), /* MX35_PAD_ATA_DIOW__ATA_DIOW */
- [666] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 1, 0x820, 1), /* MX35_PAD_ATA_DIOW__ESDHC3_DAT1 */
- [667] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 2, 0x0, 0), /* MX35_PAD_ATA_DIOW__USB_TOP_USBOTG_STP */
- [668] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 3, 0x0, 0), /* MX35_PAD_ATA_DIOW__IPU_DISPB_BE1 */
- [669] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 4, 0x7ec, 2), /* MX35_PAD_ATA_DIOW__CSPI2_MOSI */
- [670] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 5, 0x8e4, 1), /* MX35_PAD_ATA_DIOW__GPIO2_9 */
- [671] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 6, 0x0, 0), /* MX35_PAD_ATA_DIOW__IPU_DIAGB_3 */
- [672] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 7, 0x0, 0), /* MX35_PAD_ATA_DIOW__ARM11P_TOP_MAX1_HMASTER_3 */
- [673] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 0, 0x0, 0), /* MX35_PAD_ATA_DMACK__ATA_DMACK */
- [674] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 1, 0x824, 1), /* MX35_PAD_ATA_DMACK__ESDHC3_DAT2 */
- [675] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 2, 0x9c8, 1), /* MX35_PAD_ATA_DMACK__USB_TOP_USBOTG_NXT */
- [676] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 4, 0x7e8, 2), /* MX35_PAD_ATA_DMACK__CSPI2_MISO */
- [677] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 5, 0x86c, 1), /* MX35_PAD_ATA_DMACK__GPIO2_10 */
- [678] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 6, 0x0, 0), /* MX35_PAD_ATA_DMACK__IPU_DIAGB_4 */
- [679] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 7, 0x0, 0), /* MX35_PAD_ATA_DMACK__ARM11P_TOP_MAX0_HMASTER_0 */
- [680] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 0, 0x0, 0), /* MX35_PAD_ATA_RESET_B__ATA_RESET_B */
- [681] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 1, 0x828, 1), /* MX35_PAD_ATA_RESET_B__ESDHC3_DAT3 */
- [682] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 2, 0x9a4, 1), /* MX35_PAD_ATA_RESET_B__USB_TOP_USBOTG_DATA_0 */
- [683] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 3, 0x0, 0), /* MX35_PAD_ATA_RESET_B__IPU_DISPB_SD_D_O */
- [684] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 4, 0x7e4, 2), /* MX35_PAD_ATA_RESET_B__CSPI2_RDY */
- [685] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 5, 0x870, 1), /* MX35_PAD_ATA_RESET_B__GPIO2_11 */
- [686] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 6, 0x0, 0), /* MX35_PAD_ATA_RESET_B__IPU_DIAGB_5 */
- [687] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 7, 0x0, 0), /* MX35_PAD_ATA_RESET_B__ARM11P_TOP_MAX0_HMASTER_1 */
- [688] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 0, 0x0, 0), /* MX35_PAD_ATA_IORDY__ATA_IORDY */
- [689] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 1, 0x0, 0), /* MX35_PAD_ATA_IORDY__ESDHC3_DAT4 */
- [690] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 2, 0x9a8, 1), /* MX35_PAD_ATA_IORDY__USB_TOP_USBOTG_DATA_1 */
- [691] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 3, 0x92c, 3), /* MX35_PAD_ATA_IORDY__IPU_DISPB_SD_D_IO */
- [692] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 4, 0x0, 0), /* MX35_PAD_ATA_IORDY__ESDHC2_DAT4 */
- [693] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 5, 0x874, 1), /* MX35_PAD_ATA_IORDY__GPIO2_12 */
- [694] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 6, 0x0, 0), /* MX35_PAD_ATA_IORDY__IPU_DIAGB_6 */
- [695] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 7, 0x0, 0), /* MX35_PAD_ATA_IORDY__ARM11P_TOP_MAX0_HMASTER_2 */
- [696] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 0, 0x0, 0), /* MX35_PAD_ATA_DATA0__ATA_DATA_0 */
- [697] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 1, 0x0, 0), /* MX35_PAD_ATA_DATA0__ESDHC3_DAT5 */
- [698] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 2, 0x9ac, 1), /* MX35_PAD_ATA_DATA0__USB_TOP_USBOTG_DATA_2 */
- [699] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 3, 0x928, 4), /* MX35_PAD_ATA_DATA0__IPU_DISPB_D12_VSYNC */
- [700] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 4, 0x0, 0), /* MX35_PAD_ATA_DATA0__ESDHC2_DAT5 */
- [701] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 5, 0x878, 1), /* MX35_PAD_ATA_DATA0__GPIO2_13 */
- [702] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 6, 0x0, 0), /* MX35_PAD_ATA_DATA0__IPU_DIAGB_7 */
- [703] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 7, 0x0, 0), /* MX35_PAD_ATA_DATA0__ARM11P_TOP_MAX0_HMASTER_3 */
- [704] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 0, 0x0, 0), /* MX35_PAD_ATA_DATA1__ATA_DATA_1 */
- [705] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 1, 0x0, 0), /* MX35_PAD_ATA_DATA1__ESDHC3_DAT6 */
- [706] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 2, 0x9b0, 1), /* MX35_PAD_ATA_DATA1__USB_TOP_USBOTG_DATA_3 */
- [707] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 3, 0x0, 0), /* MX35_PAD_ATA_DATA1__IPU_DISPB_SD_CLK */
- [708] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 4, 0x0, 0), /* MX35_PAD_ATA_DATA1__ESDHC2_DAT6 */
- [709] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 5, 0x87c, 1), /* MX35_PAD_ATA_DATA1__GPIO2_14 */
- [710] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 6, 0x0, 0), /* MX35_PAD_ATA_DATA1__IPU_DIAGB_8 */
- [711] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 7, 0x0, 0), /* MX35_PAD_ATA_DATA1__ARM11P_TOP_TRACE_27 */
- [712] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 0, 0x0, 0), /* MX35_PAD_ATA_DATA2__ATA_DATA_2 */
- [713] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 1, 0x0, 0), /* MX35_PAD_ATA_DATA2__ESDHC3_DAT7 */
- [714] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 2, 0x9b4, 1), /* MX35_PAD_ATA_DATA2__USB_TOP_USBOTG_DATA_4 */
- [715] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 3, 0x0, 0), /* MX35_PAD_ATA_DATA2__IPU_DISPB_SER_RS */
- [716] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 4, 0x0, 0), /* MX35_PAD_ATA_DATA2__ESDHC2_DAT7 */
- [717] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 5, 0x880, 1), /* MX35_PAD_ATA_DATA2__GPIO2_15 */
- [718] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 6, 0x0, 0), /* MX35_PAD_ATA_DATA2__IPU_DIAGB_9 */
- [719] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 7, 0x0, 0), /* MX35_PAD_ATA_DATA2__ARM11P_TOP_TRACE_28 */
- [720] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 0, 0x0, 0), /* MX35_PAD_ATA_DATA3__ATA_DATA_3 */
- [721] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 1, 0x814, 1), /* MX35_PAD_ATA_DATA3__ESDHC3_CLK */
- [722] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 2, 0x9b8, 1), /* MX35_PAD_ATA_DATA3__USB_TOP_USBOTG_DATA_5 */
- [723] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 4, 0x7e0, 2), /* MX35_PAD_ATA_DATA3__CSPI2_SCLK */
- [724] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 5, 0x884, 1), /* MX35_PAD_ATA_DATA3__GPIO2_16 */
- [725] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 6, 0x0, 0), /* MX35_PAD_ATA_DATA3__IPU_DIAGB_10 */
- [726] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 7, 0x0, 0), /* MX35_PAD_ATA_DATA3__ARM11P_TOP_TRACE_29 */
- [727] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 0, 0x0, 0), /* MX35_PAD_ATA_DATA4__ATA_DATA_4 */
- [728] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 1, 0x818, 1), /* MX35_PAD_ATA_DATA4__ESDHC3_CMD */
- [729] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 2, 0x9bc, 1), /* MX35_PAD_ATA_DATA4__USB_TOP_USBOTG_DATA_6 */
- [730] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 5, 0x888, 1), /* MX35_PAD_ATA_DATA4__GPIO2_17 */
- [731] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 6, 0x0, 0), /* MX35_PAD_ATA_DATA4__IPU_DIAGB_11 */
- [732] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 7, 0x0, 0), /* MX35_PAD_ATA_DATA4__ARM11P_TOP_TRACE_30 */
- [733] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 0, 0x0, 0), /* MX35_PAD_ATA_DATA5__ATA_DATA_5 */
- [734] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 2, 0x9c0, 1), /* MX35_PAD_ATA_DATA5__USB_TOP_USBOTG_DATA_7 */
- [735] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 5, 0x88c, 1), /* MX35_PAD_ATA_DATA5__GPIO2_18 */
- [736] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 6, 0x0, 0), /* MX35_PAD_ATA_DATA5__IPU_DIAGB_12 */
- [737] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 7, 0x0, 0), /* MX35_PAD_ATA_DATA5__ARM11P_TOP_TRACE_31 */
- [738] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 0, 0x0, 0), /* MX35_PAD_ATA_DATA6__ATA_DATA_6 */
- [739] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 1, 0x0, 0), /* MX35_PAD_ATA_DATA6__CAN1_TXCAN */
- [740] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 2, 0x0, 0), /* MX35_PAD_ATA_DATA6__UART1_DTR */
- [741] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 3, 0x7b4, 0), /* MX35_PAD_ATA_DATA6__AUDMUX_AUD6_TXD */
- [742] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 5, 0x890, 1), /* MX35_PAD_ATA_DATA6__GPIO2_19 */
- [743] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 6, 0x0, 0), /* MX35_PAD_ATA_DATA6__IPU_DIAGB_13 */
- [744] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 0, 0x0, 0), /* MX35_PAD_ATA_DATA7__ATA_DATA_7 */
- [745] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 1, 0x7c8, 2), /* MX35_PAD_ATA_DATA7__CAN1_RXCAN */
- [746] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 2, 0x0, 0), /* MX35_PAD_ATA_DATA7__UART1_DSR */
- [747] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 3, 0x7b0, 0), /* MX35_PAD_ATA_DATA7__AUDMUX_AUD6_RXD */
- [748] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 5, 0x898, 1), /* MX35_PAD_ATA_DATA7__GPIO2_20 */
- [749] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 6, 0x0, 0), /* MX35_PAD_ATA_DATA7__IPU_DIAGB_14 */
- [750] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 0, 0x0, 0), /* MX35_PAD_ATA_DATA8__ATA_DATA_8 */
- [751] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 1, 0x99c, 1), /* MX35_PAD_ATA_DATA8__UART3_RTS */
- [752] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 2, 0x0, 0), /* MX35_PAD_ATA_DATA8__UART1_RI */
- [753] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 3, 0x7c0, 0), /* MX35_PAD_ATA_DATA8__AUDMUX_AUD6_TXC */
- [754] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 5, 0x89c, 1), /* MX35_PAD_ATA_DATA8__GPIO2_21 */
- [755] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 6, 0x0, 0), /* MX35_PAD_ATA_DATA8__IPU_DIAGB_15 */
- [756] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 0, 0x0, 0), /* MX35_PAD_ATA_DATA9__ATA_DATA_9 */
- [757] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 1, 0x0, 0), /* MX35_PAD_ATA_DATA9__UART3_CTS */
- [758] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 2, 0x0, 0), /* MX35_PAD_ATA_DATA9__UART1_DCD */
- [759] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 3, 0x7c4, 0), /* MX35_PAD_ATA_DATA9__AUDMUX_AUD6_TXFS */
- [760] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 5, 0x8a0, 1), /* MX35_PAD_ATA_DATA9__GPIO2_22 */
- [761] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 6, 0x0, 0), /* MX35_PAD_ATA_DATA9__IPU_DIAGB_16 */
- [762] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 0, 0x0, 0), /* MX35_PAD_ATA_DATA10__ATA_DATA_10 */
- [763] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 1, 0x9a0, 2), /* MX35_PAD_ATA_DATA10__UART3_RXD_MUX */
- [764] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 3, 0x7b8, 0), /* MX35_PAD_ATA_DATA10__AUDMUX_AUD6_RXC */
- [765] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 5, 0x8a4, 1), /* MX35_PAD_ATA_DATA10__GPIO2_23 */
- [766] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 6, 0x0, 0), /* MX35_PAD_ATA_DATA10__IPU_DIAGB_17 */
- [767] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 0, 0x0, 0), /* MX35_PAD_ATA_DATA11__ATA_DATA_11 */
- [768] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 1, 0x0, 0), /* MX35_PAD_ATA_DATA11__UART3_TXD_MUX */
- [769] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 3, 0x7bc, 0), /* MX35_PAD_ATA_DATA11__AUDMUX_AUD6_RXFS */
- [770] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 5, 0x8a8, 1), /* MX35_PAD_ATA_DATA11__GPIO2_24 */
- [771] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 6, 0x0, 0), /* MX35_PAD_ATA_DATA11__IPU_DIAGB_18 */
- [772] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 0, 0x0, 0), /* MX35_PAD_ATA_DATA12__ATA_DATA_12 */
- [773] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 1, 0x91c, 3), /* MX35_PAD_ATA_DATA12__I2C3_SCL */
- [774] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 5, 0x8ac, 1), /* MX35_PAD_ATA_DATA12__GPIO2_25 */
- [775] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 6, 0x0, 0), /* MX35_PAD_ATA_DATA12__IPU_DIAGB_19 */
- [776] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 0, 0x0, 0), /* MX35_PAD_ATA_DATA13__ATA_DATA_13 */
- [777] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 1, 0x920, 3), /* MX35_PAD_ATA_DATA13__I2C3_SDA */
- [778] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 5, 0x8b0, 1), /* MX35_PAD_ATA_DATA13__GPIO2_26 */
- [779] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 6, 0x0, 0), /* MX35_PAD_ATA_DATA13__IPU_DIAGB_20 */
- [780] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 0, 0x0, 0), /* MX35_PAD_ATA_DATA14__ATA_DATA_14 */
- [781] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 1, 0x930, 2), /* MX35_PAD_ATA_DATA14__IPU_CSI_D_0 */
- [782] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 3, 0x970, 2), /* MX35_PAD_ATA_DATA14__KPP_ROW_0 */
- [783] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 5, 0x8b4, 1), /* MX35_PAD_ATA_DATA14__GPIO2_27 */
- [784] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 6, 0x0, 0), /* MX35_PAD_ATA_DATA14__IPU_DIAGB_21 */
- [785] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 0, 0x0, 0), /* MX35_PAD_ATA_DATA15__ATA_DATA_15 */
- [786] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 1, 0x934, 2), /* MX35_PAD_ATA_DATA15__IPU_CSI_D_1 */
- [787] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 3, 0x974, 2), /* MX35_PAD_ATA_DATA15__KPP_ROW_1 */
- [788] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 5, 0x8b8, 1), /* MX35_PAD_ATA_DATA15__GPIO2_28 */
- [789] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 6, 0x0, 0), /* MX35_PAD_ATA_DATA15__IPU_DIAGB_22 */
- [790] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 0, 0x0, 0), /* MX35_PAD_ATA_INTRQ__ATA_INTRQ */
- [791] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 1, 0x938, 3), /* MX35_PAD_ATA_INTRQ__IPU_CSI_D_2 */
- [792] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 3, 0x978, 2), /* MX35_PAD_ATA_INTRQ__KPP_ROW_2 */
- [793] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 5, 0x8bc, 1), /* MX35_PAD_ATA_INTRQ__GPIO2_29 */
- [794] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 6, 0x0, 0), /* MX35_PAD_ATA_INTRQ__IPU_DIAGB_23 */
- [795] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 0, 0x0, 0), /* MX35_PAD_ATA_BUFF_EN__ATA_BUFFER_EN */
- [796] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 1, 0x93c, 3), /* MX35_PAD_ATA_BUFF_EN__IPU_CSI_D_3 */
- [797] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 3, 0x97c, 2), /* MX35_PAD_ATA_BUFF_EN__KPP_ROW_3 */
- [798] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 5, 0x8c4, 1), /* MX35_PAD_ATA_BUFF_EN__GPIO2_30 */
- [799] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 6, 0x0, 0), /* MX35_PAD_ATA_BUFF_EN__IPU_DIAGB_24 */
- [800] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 0, 0x0, 0), /* MX35_PAD_ATA_DMARQ__ATA_DMARQ */
- [801] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 1, 0x940, 2), /* MX35_PAD_ATA_DMARQ__IPU_CSI_D_4 */
- [802] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 3, 0x950, 2), /* MX35_PAD_ATA_DMARQ__KPP_COL_0 */
- [803] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 5, 0x8c8, 1), /* MX35_PAD_ATA_DMARQ__GPIO2_31 */
- [804] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 6, 0x0, 0), /* MX35_PAD_ATA_DMARQ__IPU_DIAGB_25 */
- [805] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 7, 0x0, 0), /* MX35_PAD_ATA_DMARQ__ECT_CTI_TRIG_IN1_4 */
- [806] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 0, 0x0, 0), /* MX35_PAD_ATA_DA0__ATA_DA_0 */
- [807] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 1, 0x944, 2), /* MX35_PAD_ATA_DA0__IPU_CSI_D_5 */
- [808] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 3, 0x954, 2), /* MX35_PAD_ATA_DA0__KPP_COL_1 */
- [809] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 5, 0x8e8, 1), /* MX35_PAD_ATA_DA0__GPIO3_0 */
- [810] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 6, 0x0, 0), /* MX35_PAD_ATA_DA0__IPU_DIAGB_26 */
- [811] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 7, 0x0, 0), /* MX35_PAD_ATA_DA0__ECT_CTI_TRIG_IN1_5 */
- [812] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 0, 0x0, 0), /* MX35_PAD_ATA_DA1__ATA_DA_1 */
- [813] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 1, 0x948, 2), /* MX35_PAD_ATA_DA1__IPU_CSI_D_6 */
- [814] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 3, 0x958, 2), /* MX35_PAD_ATA_DA1__KPP_COL_2 */
- [815] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 5, 0x0, 0), /* MX35_PAD_ATA_DA1__GPIO3_1 */
- [816] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 6, 0x0, 0), /* MX35_PAD_ATA_DA1__IPU_DIAGB_27 */
- [817] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 7, 0x0, 0), /* MX35_PAD_ATA_DA1__ECT_CTI_TRIG_IN1_6 */
- [818] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 0, 0x0, 0), /* MX35_PAD_ATA_DA2__ATA_DA_2 */
- [819] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 1, 0x94c, 2), /* MX35_PAD_ATA_DA2__IPU_CSI_D_7 */
- [820] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 3, 0x95c, 2), /* MX35_PAD_ATA_DA2__KPP_COL_3 */
- [821] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 5, 0x0, 0), /* MX35_PAD_ATA_DA2__GPIO3_2 */
- [822] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 6, 0x0, 0), /* MX35_PAD_ATA_DA2__IPU_DIAGB_28 */
- [823] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 7, 0x0, 0), /* MX35_PAD_ATA_DA2__ECT_CTI_TRIG_IN1_7 */
- [824] = IMX_PIN_REG(MX35_PAD_MLB_CLK, 0x738, 0x2d4, 0, 0x0, 0), /* MX35_PAD_MLB_CLK__MLB_MLBCLK */
- [825] = IMX_PIN_REG(MX35_PAD_MLB_CLK, 0x738, 0x2d4, 5, 0x0, 0), /* MX35_PAD_MLB_CLK__GPIO3_3 */
- [826] = IMX_PIN_REG(MX35_PAD_MLB_DAT, 0x73c, 0x2d8, 0, 0x0, 0), /* MX35_PAD_MLB_DAT__MLB_MLBDAT */
- [827] = IMX_PIN_REG(MX35_PAD_MLB_DAT, 0x73c, 0x2d8, 5, 0x904, 1), /* MX35_PAD_MLB_DAT__GPIO3_4 */
- [828] = IMX_PIN_REG(MX35_PAD_MLB_SIG, 0x740, 0x2dc, 0, 0x0, 0), /* MX35_PAD_MLB_SIG__MLB_MLBSIG */
- [829] = IMX_PIN_REG(MX35_PAD_MLB_SIG, 0x740, 0x2dc, 5, 0x908, 1), /* MX35_PAD_MLB_SIG__GPIO3_5 */
- [830] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 0, 0x0, 0), /* MX35_PAD_FEC_TX_CLK__FEC_TX_CLK */
- [831] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 1, 0x804, 1), /* MX35_PAD_FEC_TX_CLK__ESDHC1_DAT4 */
- [832] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 2, 0x9a0, 3), /* MX35_PAD_FEC_TX_CLK__UART3_RXD_MUX */
- [833] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 3, 0x9ec, 1), /* MX35_PAD_FEC_TX_CLK__USB_TOP_USBH2_DIR */
- [834] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 4, 0x7ec, 3), /* MX35_PAD_FEC_TX_CLK__CSPI2_MOSI */
- [835] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 5, 0x90c, 1), /* MX35_PAD_FEC_TX_CLK__GPIO3_6 */
- [836] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 6, 0x928, 5), /* MX35_PAD_FEC_TX_CLK__IPU_DISPB_D12_VSYNC */
- [837] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 7, 0x0, 0), /* MX35_PAD_FEC_TX_CLK__ARM11P_TOP_EVNTBUS_0 */
- [838] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 0, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__FEC_RX_CLK */
- [839] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 1, 0x808, 1), /* MX35_PAD_FEC_RX_CLK__ESDHC1_DAT5 */
- [840] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 2, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__UART3_TXD_MUX */
- [841] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 3, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__USB_TOP_USBH2_STP */
- [842] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 4, 0x7e8, 3), /* MX35_PAD_FEC_RX_CLK__CSPI2_MISO */
- [843] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 5, 0x910, 1), /* MX35_PAD_FEC_RX_CLK__GPIO3_7 */
- [844] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 6, 0x92c, 4), /* MX35_PAD_FEC_RX_CLK__IPU_DISPB_SD_D_I */
- [845] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 7, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__ARM11P_TOP_EVNTBUS_1 */
- [846] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 0, 0x0, 0), /* MX35_PAD_FEC_RX_DV__FEC_RX_DV */
- [847] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 1, 0x80c, 1), /* MX35_PAD_FEC_RX_DV__ESDHC1_DAT6 */
- [848] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 2, 0x99c, 2), /* MX35_PAD_FEC_RX_DV__UART3_RTS */
- [849] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 3, 0x9f0, 1), /* MX35_PAD_FEC_RX_DV__USB_TOP_USBH2_NXT */
- [850] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 4, 0x7e0, 3), /* MX35_PAD_FEC_RX_DV__CSPI2_SCLK */
- [851] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 5, 0x914, 1), /* MX35_PAD_FEC_RX_DV__GPIO3_8 */
- [852] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 6, 0x0, 0), /* MX35_PAD_FEC_RX_DV__IPU_DISPB_SD_CLK */
- [853] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 7, 0x0, 0), /* MX35_PAD_FEC_RX_DV__ARM11P_TOP_EVNTBUS_2 */
- [854] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 0, 0x0, 0), /* MX35_PAD_FEC_COL__FEC_COL */
- [855] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 1, 0x810, 1), /* MX35_PAD_FEC_COL__ESDHC1_DAT7 */
- [856] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 2, 0x0, 0), /* MX35_PAD_FEC_COL__UART3_CTS */
- [857] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 3, 0x9cc, 1), /* MX35_PAD_FEC_COL__USB_TOP_USBH2_DATA_0 */
- [858] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 4, 0x7e4, 3), /* MX35_PAD_FEC_COL__CSPI2_RDY */
- [859] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 5, 0x918, 1), /* MX35_PAD_FEC_COL__GPIO3_9 */
- [860] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 6, 0x0, 0), /* MX35_PAD_FEC_COL__IPU_DISPB_SER_RS */
- [861] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 7, 0x0, 0), /* MX35_PAD_FEC_COL__ARM11P_TOP_EVNTBUS_3 */
- [862] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA0__FEC_RDATA_0 */
- [863] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 1, 0x0, 0), /* MX35_PAD_FEC_RDATA0__PWM_PWMO */
- [864] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 2, 0x0, 0), /* MX35_PAD_FEC_RDATA0__UART3_DTR */
- [865] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 3, 0x9d0, 1), /* MX35_PAD_FEC_RDATA0__USB_TOP_USBH2_DATA_1 */
- [866] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 4, 0x7f0, 2), /* MX35_PAD_FEC_RDATA0__CSPI2_SS0 */
- [867] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 5, 0x8ec, 1), /* MX35_PAD_FEC_RDATA0__GPIO3_10 */
- [868] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 6, 0x0, 0), /* MX35_PAD_FEC_RDATA0__IPU_DISPB_CS1 */
- [869] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 7, 0x0, 0), /* MX35_PAD_FEC_RDATA0__ARM11P_TOP_EVNTBUS_4 */
- [870] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA0__FEC_TDATA_0 */
- [871] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 1, 0x0, 0), /* MX35_PAD_FEC_TDATA0__SPDIF_SPDIF_OUT1 */
- [872] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 2, 0x0, 0), /* MX35_PAD_FEC_TDATA0__UART3_DSR */
- [873] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 3, 0x9d4, 1), /* MX35_PAD_FEC_TDATA0__USB_TOP_USBH2_DATA_2 */
- [874] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 4, 0x7f4, 2), /* MX35_PAD_FEC_TDATA0__CSPI2_SS1 */
- [875] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 5, 0x8f0, 1), /* MX35_PAD_FEC_TDATA0__GPIO3_11 */
- [876] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 6, 0x0, 0), /* MX35_PAD_FEC_TDATA0__IPU_DISPB_CS0 */
- [877] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 7, 0x0, 0), /* MX35_PAD_FEC_TDATA0__ARM11P_TOP_EVNTBUS_5 */
- [878] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 0, 0x0, 0), /* MX35_PAD_FEC_TX_EN__FEC_TX_EN */
- [879] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 1, 0x998, 3), /* MX35_PAD_FEC_TX_EN__SPDIF_SPDIF_IN1 */
- [880] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 2, 0x0, 0), /* MX35_PAD_FEC_TX_EN__UART3_RI */
- [881] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 3, 0x9d8, 1), /* MX35_PAD_FEC_TX_EN__USB_TOP_USBH2_DATA_3 */
- [882] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 5, 0x8f4, 1), /* MX35_PAD_FEC_TX_EN__GPIO3_12 */
- [883] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 6, 0x0, 0), /* MX35_PAD_FEC_TX_EN__IPU_DISPB_PAR_RS */
- [884] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 7, 0x0, 0), /* MX35_PAD_FEC_TX_EN__ARM11P_TOP_EVNTBUS_6 */
- [885] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 0, 0x0, 0), /* MX35_PAD_FEC_MDC__FEC_MDC */
- [886] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 1, 0x0, 0), /* MX35_PAD_FEC_MDC__CAN2_TXCAN */
- [887] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 2, 0x0, 0), /* MX35_PAD_FEC_MDC__UART3_DCD */
- [888] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 3, 0x9dc, 1), /* MX35_PAD_FEC_MDC__USB_TOP_USBH2_DATA_4 */
- [889] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 5, 0x8f8, 1), /* MX35_PAD_FEC_MDC__GPIO3_13 */
- [890] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 6, 0x0, 0), /* MX35_PAD_FEC_MDC__IPU_DISPB_WR */
- [891] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 7, 0x0, 0), /* MX35_PAD_FEC_MDC__ARM11P_TOP_EVNTBUS_7 */
- [892] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 0, 0x0, 0), /* MX35_PAD_FEC_MDIO__FEC_MDIO */
- [893] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 1, 0x7cc, 2), /* MX35_PAD_FEC_MDIO__CAN2_RXCAN */
- [894] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 3, 0x9e0, 1), /* MX35_PAD_FEC_MDIO__USB_TOP_USBH2_DATA_5 */
- [895] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 5, 0x8fc, 1), /* MX35_PAD_FEC_MDIO__GPIO3_14 */
- [896] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 6, 0x0, 0), /* MX35_PAD_FEC_MDIO__IPU_DISPB_RD */
- [897] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 7, 0x0, 0), /* MX35_PAD_FEC_MDIO__ARM11P_TOP_EVNTBUS_8 */
- [898] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 0, 0x0, 0), /* MX35_PAD_FEC_TX_ERR__FEC_TX_ERR */
- [899] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 1, 0x990, 2), /* MX35_PAD_FEC_TX_ERR__OWIRE_LINE */
- [900] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 2, 0x994, 4), /* MX35_PAD_FEC_TX_ERR__SPDIF_SPDIF_EXTCLK */
- [901] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 3, 0x9e4, 1), /* MX35_PAD_FEC_TX_ERR__USB_TOP_USBH2_DATA_6 */
- [902] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 5, 0x900, 1), /* MX35_PAD_FEC_TX_ERR__GPIO3_15 */
- [903] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 6, 0x924, 3), /* MX35_PAD_FEC_TX_ERR__IPU_DISPB_D0_VSYNC */
- [904] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 7, 0x0, 0), /* MX35_PAD_FEC_TX_ERR__ARM11P_TOP_EVNTBUS_9 */
- [905] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 0, 0x0, 0), /* MX35_PAD_FEC_RX_ERR__FEC_RX_ERR */
- [906] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 1, 0x930, 3), /* MX35_PAD_FEC_RX_ERR__IPU_CSI_D_0 */
- [907] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 3, 0x9e8, 1), /* MX35_PAD_FEC_RX_ERR__USB_TOP_USBH2_DATA_7 */
- [908] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 4, 0x960, 1), /* MX35_PAD_FEC_RX_ERR__KPP_COL_4 */
- [909] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 5, 0x0, 0), /* MX35_PAD_FEC_RX_ERR__GPIO3_16 */
- [910] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 6, 0x92c, 5), /* MX35_PAD_FEC_RX_ERR__IPU_DISPB_SD_D_IO */
- [911] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 0, 0x0, 0), /* MX35_PAD_FEC_CRS__FEC_CRS */
- [912] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 1, 0x934, 3), /* MX35_PAD_FEC_CRS__IPU_CSI_D_1 */
- [913] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 3, 0x0, 0), /* MX35_PAD_FEC_CRS__USB_TOP_USBH2_PWR */
- [914] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 4, 0x964, 1), /* MX35_PAD_FEC_CRS__KPP_COL_5 */
- [915] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 5, 0x0, 0), /* MX35_PAD_FEC_CRS__GPIO3_17 */
- [916] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 6, 0x0, 0), /* MX35_PAD_FEC_CRS__IPU_FLASH_STROBE */
- [917] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA1__FEC_RDATA_1 */
- [918] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 1, 0x938, 4), /* MX35_PAD_FEC_RDATA1__IPU_CSI_D_2 */
- [919] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 2, 0x0, 0), /* MX35_PAD_FEC_RDATA1__AUDMUX_AUD6_RXC */
- [920] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 3, 0x9f4, 2), /* MX35_PAD_FEC_RDATA1__USB_TOP_USBH2_OC */
- [921] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 4, 0x968, 1), /* MX35_PAD_FEC_RDATA1__KPP_COL_6 */
- [922] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 5, 0x0, 0), /* MX35_PAD_FEC_RDATA1__GPIO3_18 */
- [923] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 6, 0x0, 0), /* MX35_PAD_FEC_RDATA1__IPU_DISPB_BE0 */
- [924] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA1__FEC_TDATA_1 */
- [925] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 1, 0x93c, 4), /* MX35_PAD_FEC_TDATA1__IPU_CSI_D_3 */
- [926] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 2, 0x7bc, 1), /* MX35_PAD_FEC_TDATA1__AUDMUX_AUD6_RXFS */
- [927] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 4, 0x96c, 1), /* MX35_PAD_FEC_TDATA1__KPP_COL_7 */
- [928] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 5, 0x0, 0), /* MX35_PAD_FEC_TDATA1__GPIO3_19 */
- [929] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 6, 0x0, 0), /* MX35_PAD_FEC_TDATA1__IPU_DISPB_BE1 */
- [930] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA2__FEC_RDATA_2 */
- [931] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 1, 0x940, 3), /* MX35_PAD_FEC_RDATA2__IPU_CSI_D_4 */
- [932] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 2, 0x7b4, 1), /* MX35_PAD_FEC_RDATA2__AUDMUX_AUD6_TXD */
- [933] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 4, 0x980, 1), /* MX35_PAD_FEC_RDATA2__KPP_ROW_4 */
- [934] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 5, 0x0, 0), /* MX35_PAD_FEC_RDATA2__GPIO3_20 */
- [935] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA2__FEC_TDATA_2 */
- [936] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 1, 0x944, 3), /* MX35_PAD_FEC_TDATA2__IPU_CSI_D_5 */
- [937] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 2, 0x7b0, 1), /* MX35_PAD_FEC_TDATA2__AUDMUX_AUD6_RXD */
- [938] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 4, 0x984, 1), /* MX35_PAD_FEC_TDATA2__KPP_ROW_5 */
- [939] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 5, 0x0, 0), /* MX35_PAD_FEC_TDATA2__GPIO3_21 */
- [940] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA3__FEC_RDATA_3 */
- [941] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 1, 0x948, 3), /* MX35_PAD_FEC_RDATA3__IPU_CSI_D_6 */
- [942] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 2, 0x7c0, 1), /* MX35_PAD_FEC_RDATA3__AUDMUX_AUD6_TXC */
- [943] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 4, 0x988, 1), /* MX35_PAD_FEC_RDATA3__KPP_ROW_6 */
- [944] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 6, 0x0, 0), /* MX35_PAD_FEC_RDATA3__GPIO3_22 */
- [945] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA3__FEC_TDATA_3 */
- [946] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 1, 0x94c, 3), /* MX35_PAD_FEC_TDATA3__IPU_CSI_D_7 */
- [947] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 2, 0x7c4, 1), /* MX35_PAD_FEC_TDATA3__AUDMUX_AUD6_TXFS */
- [948] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 4, 0x98c, 1), /* MX35_PAD_FEC_TDATA3__KPP_ROW_7 */
- [949] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 5, 0x0, 0), /* MX35_PAD_FEC_TDATA3__GPIO3_23 */
- [950] = IMX_PIN_REG(MX35_PAD_EXT_ARMCLK, 0x78c, 0x0, 0, 0x0, 0), /* MX35_PAD_EXT_ARMCLK__CCM_EXT_ARMCLK */
- [951] = IMX_PIN_REG(MX35_PAD_TEST_MODE, 0x790, 0x0, 0, 0x0, 0), /* MX35_PAD_TEST_MODE__TCU_TEST_MODE */
+ MX35_PAD_RESERVE0 = 0,
+ MX35_PAD_CAPTURE = 1,
+ MX35_PAD_COMPARE = 2,
+ MX35_PAD_WDOG_RST = 3,
+ MX35_PAD_GPIO1_0 = 4,
+ MX35_PAD_GPIO1_1 = 5,
+ MX35_PAD_GPIO2_0 = 6,
+ MX35_PAD_GPIO3_0 = 7,
+ MX35_PAD_CLKO = 8,
+ MX35_PAD_VSTBY = 9,
+ MX35_PAD_A0 = 10,
+ MX35_PAD_A1 = 11,
+ MX35_PAD_A2 = 12,
+ MX35_PAD_A3 = 13,
+ MX35_PAD_A4 = 14,
+ MX35_PAD_A5 = 15,
+ MX35_PAD_A6 = 16,
+ MX35_PAD_A7 = 17,
+ MX35_PAD_A8 = 18,
+ MX35_PAD_A9 = 19,
+ MX35_PAD_A10 = 20,
+ MX35_PAD_MA10 = 21,
+ MX35_PAD_A11 = 22,
+ MX35_PAD_A12 = 23,
+ MX35_PAD_A13 = 24,
+ MX35_PAD_A14 = 25,
+ MX35_PAD_A15 = 26,
+ MX35_PAD_A16 = 27,
+ MX35_PAD_A17 = 28,
+ MX35_PAD_A18 = 29,
+ MX35_PAD_A19 = 30,
+ MX35_PAD_A20 = 31,
+ MX35_PAD_A21 = 32,
+ MX35_PAD_A22 = 33,
+ MX35_PAD_A23 = 34,
+ MX35_PAD_A24 = 35,
+ MX35_PAD_A25 = 36,
+ MX35_PAD_EB0 = 37,
+ MX35_PAD_EB1 = 38,
+ MX35_PAD_OE = 39,
+ MX35_PAD_CS0 = 40,
+ MX35_PAD_CS1 = 41,
+ MX35_PAD_CS2 = 42,
+ MX35_PAD_CS3 = 43,
+ MX35_PAD_CS4 = 44,
+ MX35_PAD_CS5 = 45,
+ MX35_PAD_NF_CE0 = 46,
+ MX35_PAD_LBA = 47,
+ MX35_PAD_BCLK = 48,
+ MX35_PAD_RW = 49,
+ MX35_PAD_NFWE_B = 50,
+ MX35_PAD_NFRE_B = 51,
+ MX35_PAD_NFALE = 52,
+ MX35_PAD_NFCLE = 53,
+ MX35_PAD_NFWP_B = 54,
+ MX35_PAD_NFRB = 55,
+ MX35_PAD_CSI_D8 = 56,
+ MX35_PAD_CSI_D9 = 57,
+ MX35_PAD_CSI_D10 = 58,
+ MX35_PAD_CSI_D11 = 59,
+ MX35_PAD_CSI_D12 = 60,
+ MX35_PAD_CSI_D13 = 61,
+ MX35_PAD_CSI_D14 = 62,
+ MX35_PAD_CSI_D15 = 63,
+ MX35_PAD_CSI_MCLK = 64,
+ MX35_PAD_CSI_VSYNC = 65,
+ MX35_PAD_CSI_HSYNC = 66,
+ MX35_PAD_CSI_PIXCLK = 67,
+ MX35_PAD_I2C1_CLK = 68,
+ MX35_PAD_I2C1_DAT = 69,
+ MX35_PAD_I2C2_CLK = 70,
+ MX35_PAD_I2C2_DAT = 71,
+ MX35_PAD_STXD4 = 72,
+ MX35_PAD_SRXD4 = 73,
+ MX35_PAD_SCK4 = 74,
+ MX35_PAD_STXFS4 = 75,
+ MX35_PAD_STXD5 = 76,
+ MX35_PAD_SRXD5 = 77,
+ MX35_PAD_SCK5 = 78,
+ MX35_PAD_STXFS5 = 79,
+ MX35_PAD_SCKR = 80,
+ MX35_PAD_FSR = 81,
+ MX35_PAD_HCKR = 82,
+ MX35_PAD_SCKT = 83,
+ MX35_PAD_FST = 84,
+ MX35_PAD_HCKT = 85,
+ MX35_PAD_TX5_RX0 = 86,
+ MX35_PAD_TX4_RX1 = 87,
+ MX35_PAD_TX3_RX2 = 88,
+ MX35_PAD_TX2_RX3 = 89,
+ MX35_PAD_TX1 = 90,
+ MX35_PAD_TX0 = 91,
+ MX35_PAD_CSPI1_MOSI = 92,
+ MX35_PAD_CSPI1_MISO = 93,
+ MX35_PAD_CSPI1_SS0 = 94,
+ MX35_PAD_CSPI1_SS1 = 95,
+ MX35_PAD_CSPI1_SCLK = 96,
+ MX35_PAD_CSPI1_SPI_RDY = 97,
+ MX35_PAD_RXD1 = 98,
+ MX35_PAD_TXD1 = 99,
+ MX35_PAD_RTS1 = 100,
+ MX35_PAD_CTS1 = 101,
+ MX35_PAD_RXD2 = 102,
+ MX35_PAD_TXD2 = 103,
+ MX35_PAD_RTS2 = 104,
+ MX35_PAD_CTS2 = 105,
+ MX35_PAD_USBOTG_PWR = 106,
+ MX35_PAD_USBOTG_OC = 107,
+ MX35_PAD_LD0 = 108,
+ MX35_PAD_LD1 = 109,
+ MX35_PAD_LD2 = 110,
+ MX35_PAD_LD3 = 111,
+ MX35_PAD_LD4 = 112,
+ MX35_PAD_LD5 = 113,
+ MX35_PAD_LD6 = 114,
+ MX35_PAD_LD7 = 115,
+ MX35_PAD_LD8 = 116,
+ MX35_PAD_LD9 = 117,
+ MX35_PAD_LD10 = 118,
+ MX35_PAD_LD11 = 119,
+ MX35_PAD_LD12 = 120,
+ MX35_PAD_LD13 = 121,
+ MX35_PAD_LD14 = 122,
+ MX35_PAD_LD15 = 123,
+ MX35_PAD_LD16 = 124,
+ MX35_PAD_LD17 = 125,
+ MX35_PAD_LD18 = 126,
+ MX35_PAD_LD19 = 127,
+ MX35_PAD_LD20 = 128,
+ MX35_PAD_LD21 = 129,
+ MX35_PAD_LD22 = 130,
+ MX35_PAD_LD23 = 131,
+ MX35_PAD_D3_HSYNC = 132,
+ MX35_PAD_D3_FPSHIFT = 133,
+ MX35_PAD_D3_DRDY = 134,
+ MX35_PAD_CONTRAST = 135,
+ MX35_PAD_D3_VSYNC = 136,
+ MX35_PAD_D3_REV = 137,
+ MX35_PAD_D3_CLS = 138,
+ MX35_PAD_D3_SPL = 139,
+ MX35_PAD_SD1_CMD = 140,
+ MX35_PAD_SD1_CLK = 141,
+ MX35_PAD_SD1_DATA0 = 142,
+ MX35_PAD_SD1_DATA1 = 143,
+ MX35_PAD_SD1_DATA2 = 144,
+ MX35_PAD_SD1_DATA3 = 145,
+ MX35_PAD_SD2_CMD = 146,
+ MX35_PAD_SD2_CLK = 147,
+ MX35_PAD_SD2_DATA0 = 148,
+ MX35_PAD_SD2_DATA1 = 149,
+ MX35_PAD_SD2_DATA2 = 150,
+ MX35_PAD_SD2_DATA3 = 151,
+ MX35_PAD_ATA_CS0 = 152,
+ MX35_PAD_ATA_CS1 = 153,
+ MX35_PAD_ATA_DIOR = 154,
+ MX35_PAD_ATA_DIOW = 155,
+ MX35_PAD_ATA_DMACK = 156,
+ MX35_PAD_ATA_RESET_B = 157,
+ MX35_PAD_ATA_IORDY = 158,
+ MX35_PAD_ATA_DATA0 = 159,
+ MX35_PAD_ATA_DATA1 = 160,
+ MX35_PAD_ATA_DATA2 = 161,
+ MX35_PAD_ATA_DATA3 = 162,
+ MX35_PAD_ATA_DATA4 = 163,
+ MX35_PAD_ATA_DATA5 = 164,
+ MX35_PAD_ATA_DATA6 = 165,
+ MX35_PAD_ATA_DATA7 = 166,
+ MX35_PAD_ATA_DATA8 = 167,
+ MX35_PAD_ATA_DATA9 = 168,
+ MX35_PAD_ATA_DATA10 = 169,
+ MX35_PAD_ATA_DATA11 = 170,
+ MX35_PAD_ATA_DATA12 = 171,
+ MX35_PAD_ATA_DATA13 = 172,
+ MX35_PAD_ATA_DATA14 = 173,
+ MX35_PAD_ATA_DATA15 = 174,
+ MX35_PAD_ATA_INTRQ = 175,
+ MX35_PAD_ATA_BUFF_EN = 176,
+ MX35_PAD_ATA_DMARQ = 177,
+ MX35_PAD_ATA_DA0 = 178,
+ MX35_PAD_ATA_DA1 = 179,
+ MX35_PAD_ATA_DA2 = 180,
+ MX35_PAD_MLB_CLK = 181,
+ MX35_PAD_MLB_DAT = 182,
+ MX35_PAD_MLB_SIG = 183,
+ MX35_PAD_FEC_TX_CLK = 184,
+ MX35_PAD_FEC_RX_CLK = 185,
+ MX35_PAD_FEC_RX_DV = 186,
+ MX35_PAD_FEC_COL = 187,
+ MX35_PAD_FEC_RDATA0 = 188,
+ MX35_PAD_FEC_TDATA0 = 189,
+ MX35_PAD_FEC_TX_EN = 190,
+ MX35_PAD_FEC_MDC = 191,
+ MX35_PAD_FEC_MDIO = 192,
+ MX35_PAD_FEC_TX_ERR = 193,
+ MX35_PAD_FEC_RX_ERR = 194,
+ MX35_PAD_FEC_CRS = 195,
+ MX35_PAD_FEC_RDATA1 = 196,
+ MX35_PAD_FEC_TDATA1 = 197,
+ MX35_PAD_FEC_RDATA2 = 198,
+ MX35_PAD_FEC_TDATA2 = 199,
+ MX35_PAD_FEC_RDATA3 = 200,
+ MX35_PAD_FEC_TDATA3 = 201,
+ MX35_PAD_RESERVE1 = 202,
+ MX35_PAD_RESERVE2 = 203,
+ MX35_PAD_RESERVE3 = 204,
+ MX35_PAD_RESERVE4 = 205,
+ MX35_PAD_RESERVE5 = 206,
+ MX35_PAD_RESERVE6 = 207,
+ MX35_PAD_RESERVE7 = 208,
+ MX35_PAD_RESET_IN_B = 209,
+ MX35_PAD_POR_B = 210,
+ MX35_PAD_RESERVE8 = 211,
+ MX35_PAD_BOOT_MODE0 = 212,
+ MX35_PAD_BOOT_MODE1 = 213,
+ MX35_PAD_CLK_MODE0 = 214,
+ MX35_PAD_CLK_MODE1 = 215,
+ MX35_PAD_POWER_FAIL = 216,
+ MX35_PAD_RESERVE9 = 217,
+ MX35_PAD_RESERVE10 = 218,
+ MX35_PAD_RESERVE11 = 219,
+ MX35_PAD_RESERVE12 = 220,
+ MX35_PAD_RESERVE13 = 221,
+ MX35_PAD_RESERVE14 = 222,
+ MX35_PAD_RESERVE15 = 223,
+ MX35_PAD_RESERVE16 = 224,
+ MX35_PAD_RESERVE17 = 225,
+ MX35_PAD_RESERVE18 = 226,
+ MX35_PAD_RESERVE19 = 227,
+ MX35_PAD_RESERVE20 = 228,
+ MX35_PAD_RESERVE21 = 229,
+ MX35_PAD_RESERVE22 = 230,
+ MX35_PAD_RESERVE23 = 231,
+ MX35_PAD_RESERVE24 = 232,
+ MX35_PAD_RESERVE25 = 233,
+ MX35_PAD_RESERVE26 = 234,
+ MX35_PAD_RESERVE27 = 235,
+ MX35_PAD_RESERVE28 = 236,
+ MX35_PAD_RESERVE29 = 237,
+ MX35_PAD_RESERVE30 = 238,
+ MX35_PAD_RESERVE31 = 239,
+ MX35_PAD_RESERVE32 = 240,
+ MX35_PAD_RESERVE33 = 241,
+ MX35_PAD_RESERVE34 = 242,
+ MX35_PAD_RESERVE35 = 243,
+ MX35_PAD_RESERVE36 = 244,
+ MX35_PAD_SDBA1 = 245,
+ MX35_PAD_SDBA0 = 246,
+ MX35_PAD_SD0 = 247,
+ MX35_PAD_SD1 = 248,
+ MX35_PAD_SD2 = 249,
+ MX35_PAD_SD3 = 250,
+ MX35_PAD_SD4 = 251,
+ MX35_PAD_SD5 = 252,
+ MX35_PAD_SD6 = 253,
+ MX35_PAD_SD7 = 254,
+ MX35_PAD_SD8 = 255,
+ MX35_PAD_SD9 = 256,
+ MX35_PAD_SD10 = 257,
+ MX35_PAD_SD11 = 258,
+ MX35_PAD_SD12 = 259,
+ MX35_PAD_SD13 = 260,
+ MX35_PAD_SD14 = 261,
+ MX35_PAD_SD15 = 262,
+ MX35_PAD_SD16 = 263,
+ MX35_PAD_SD17 = 264,
+ MX35_PAD_SD18 = 265,
+ MX35_PAD_SD19 = 266,
+ MX35_PAD_SD20 = 267,
+ MX35_PAD_SD21 = 268,
+ MX35_PAD_SD22 = 269,
+ MX35_PAD_SD23 = 270,
+ MX35_PAD_SD24 = 271,
+ MX35_PAD_SD25 = 272,
+ MX35_PAD_SD26 = 273,
+ MX35_PAD_SD27 = 274,
+ MX35_PAD_SD28 = 275,
+ MX35_PAD_SD29 = 276,
+ MX35_PAD_SD30 = 277,
+ MX35_PAD_SD31 = 278,
+ MX35_PAD_DQM0 = 279,
+ MX35_PAD_DQM1 = 280,
+ MX35_PAD_DQM2 = 281,
+ MX35_PAD_DQM3 = 282,
+ MX35_PAD_RESERVE37 = 283,
+ MX35_PAD_RESERVE38 = 284,
+ MX35_PAD_RESERVE39 = 285,
+ MX35_PAD_RESERVE40 = 286,
+ MX35_PAD_RESERVE41 = 287,
+ MX35_PAD_RESERVE42 = 288,
+ MX35_PAD_RESERVE43 = 289,
+ MX35_PAD_RESERVE44 = 290,
+ MX35_PAD_RESERVE45 = 291,
+ MX35_PAD_RESERVE46 = 292,
+ MX35_PAD_ECB = 293,
+ MX35_PAD_RESERVE47 = 294,
+ MX35_PAD_RESERVE48 = 295,
+ MX35_PAD_RESERVE49 = 296,
+ MX35_PAD_RAS = 297,
+ MX35_PAD_CAS = 298,
+ MX35_PAD_SDWE = 299,
+ MX35_PAD_SDCKE0 = 300,
+ MX35_PAD_SDCKE1 = 301,
+ MX35_PAD_SDCLK = 302,
+ MX35_PAD_SDQS0 = 303,
+ MX35_PAD_SDQS1 = 304,
+ MX35_PAD_SDQS2 = 305,
+ MX35_PAD_SDQS3 = 306,
+ MX35_PAD_RESERVE50 = 307,
+ MX35_PAD_RESERVE51 = 308,
+ MX35_PAD_RESERVE52 = 309,
+ MX35_PAD_RESERVE53 = 310,
+ MX35_PAD_RESERVE54 = 311,
+ MX35_PAD_RESERVE55 = 312,
+ MX35_PAD_D15 = 313,
+ MX35_PAD_D14 = 314,
+ MX35_PAD_D13 = 315,
+ MX35_PAD_D12 = 316,
+ MX35_PAD_D11 = 317,
+ MX35_PAD_D10 = 318,
+ MX35_PAD_D9 = 319,
+ MX35_PAD_D8 = 320,
+ MX35_PAD_D7 = 321,
+ MX35_PAD_D6 = 322,
+ MX35_PAD_D5 = 323,
+ MX35_PAD_D4 = 324,
+ MX35_PAD_D3 = 325,
+ MX35_PAD_D2 = 326,
+ MX35_PAD_D1 = 327,
+ MX35_PAD_D0 = 328,
+ MX35_PAD_RESERVE56 = 329,
+ MX35_PAD_RESERVE57 = 330,
+ MX35_PAD_RESERVE58 = 331,
+ MX35_PAD_RESERVE59 = 332,
+ MX35_PAD_RESERVE60 = 333,
+ MX35_PAD_RESERVE61 = 334,
+ MX35_PAD_RESERVE62 = 335,
+ MX35_PAD_RESERVE63 = 336,
+ MX35_PAD_RESERVE64 = 337,
+ MX35_PAD_RESERVE65 = 338,
+ MX35_PAD_RESERVE66 = 339,
+ MX35_PAD_RESERVE67 = 340,
+ MX35_PAD_RESERVE68 = 341,
+ MX35_PAD_RESERVE69 = 342,
+ MX35_PAD_RESERVE70 = 343,
+ MX35_PAD_RESERVE71 = 344,
+ MX35_PAD_RESERVE72 = 345,
+ MX35_PAD_RESERVE73 = 346,
+ MX35_PAD_RESERVE74 = 347,
+ MX35_PAD_RESERVE75 = 348,
+ MX35_PAD_RESERVE76 = 349,
+ MX35_PAD_RESERVE77 = 350,
+ MX35_PAD_RESERVE78 = 351,
+ MX35_PAD_RESERVE79 = 352,
+ MX35_PAD_RESERVE80 = 353,
+ MX35_PAD_RESERVE81 = 354,
+ MX35_PAD_RESERVE82 = 355,
+ MX35_PAD_RESERVE83 = 356,
+ MX35_PAD_RESERVE84 = 357,
+ MX35_PAD_RESERVE85 = 358,
+ MX35_PAD_RESERVE86 = 359,
+ MX35_PAD_RESERVE87 = 360,
+ MX35_PAD_RESERVE88 = 361,
+ MX35_PAD_RESERVE89 = 362,
+ MX35_PAD_RESERVE90 = 363,
+ MX35_PAD_RESERVE91 = 364,
+ MX35_PAD_RESERVE92 = 365,
+ MX35_PAD_RESERVE93 = 366,
+ MX35_PAD_RESERVE94 = 367,
+ MX35_PAD_RESERVE95 = 368,
+ MX35_PAD_RESERVE96 = 369,
+ MX35_PAD_RESERVE97 = 370,
+ MX35_PAD_RESERVE98 = 371,
+ MX35_PAD_RESERVE99 = 372,
+ MX35_PAD_RESERVE100 = 373,
+ MX35_PAD_RESERVE101 = 374,
+ MX35_PAD_RESERVE102 = 375,
+ MX35_PAD_RESERVE103 = 376,
+ MX35_PAD_RESERVE104 = 377,
+ MX35_PAD_RESERVE105 = 378,
+ MX35_PAD_RTCK = 379,
+ MX35_PAD_TCK = 380,
+ MX35_PAD_TMS = 381,
+ MX35_PAD_TDI = 382,
+ MX35_PAD_TDO = 383,
+ MX35_PAD_TRSTB = 384,
+ MX35_PAD_DE_B = 385,
+ MX35_PAD_SJC_MOD = 386,
+ MX35_PAD_RESERVE106 = 387,
+ MX35_PAD_RESERVE107 = 388,
+ MX35_PAD_RESERVE108 = 389,
+ MX35_PAD_RESERVE109 = 390,
+ MX35_PAD_RESERVE110 = 391,
+ MX35_PAD_RESERVE111 = 392,
+ MX35_PAD_RESERVE112 = 393,
+ MX35_PAD_RESERVE113 = 394,
+ MX35_PAD_RESERVE114 = 395,
+ MX35_PAD_RESERVE115 = 396,
+ MX35_PAD_RESERVE116 = 397,
+ MX35_PAD_RESERVE117 = 398,
+ MX35_PAD_RESERVE118 = 399,
+ MX35_PAD_RESERVE119 = 400,
+ MX35_PAD_RESERVE120 = 401,
+ MX35_PAD_RESERVE121 = 402,
+ MX35_PAD_RESERVE122 = 403,
+ MX35_PAD_RESERVE123 = 404,
+ MX35_PAD_RESERVE124 = 405,
+ MX35_PAD_RESERVE125 = 406,
+ MX35_PAD_RESERVE126 = 407,
+ MX35_PAD_RESERVE127 = 408,
+ MX35_PAD_RESERVE128 = 409,
+ MX35_PAD_RESERVE129 = 410,
+ MX35_PAD_RESERVE130 = 411,
+ MX35_PAD_RESERVE131 = 412,
+ MX35_PAD_RESERVE132 = 413,
+ MX35_PAD_RESERVE133 = 414,
+ MX35_PAD_RESERVE134 = 415,
+ MX35_PAD_RESERVE135 = 416,
+ MX35_PAD_RESERVE136 = 417,
+ MX35_PAD_RESERVE137 = 418,
+ MX35_PAD_RESERVE138 = 419,
+ MX35_PAD_RESERVE139 = 420,
+ MX35_PAD_RESERVE140 = 421,
+ MX35_PAD_RESERVE141 = 422,
+ MX35_PAD_RESERVE142 = 423,
+ MX35_PAD_RESERVE143 = 424,
+ MX35_PAD_RESERVE144 = 425,
+ MX35_PAD_RESERVE145 = 426,
+ MX35_PAD_RESERVE146 = 427,
+ MX35_PAD_RESERVE147 = 428,
+ MX35_PAD_RESERVE148 = 429,
+ MX35_PAD_RESERVE149 = 430,
+ MX35_PAD_RESERVE150 = 431,
+ MX35_PAD_RESERVE151 = 432,
+ MX35_PAD_RESERVE152 = 433,
+ MX35_PAD_RESERVE153 = 434,
+ MX35_PAD_RESERVE154 = 435,
+ MX35_PAD_RESERVE155 = 436,
+ MX35_PAD_RESERVE156 = 437,
+ MX35_PAD_RESERVE157 = 438,
+ MX35_PAD_RESERVE158 = 439,
+ MX35_PAD_RESERVE159 = 440,
+ MX35_PAD_RESERVE160 = 441,
+ MX35_PAD_RESERVE161 = 442,
+ MX35_PAD_RESERVE162 = 443,
+ MX35_PAD_RESERVE163 = 444,
+ MX35_PAD_RESERVE164 = 445,
+ MX35_PAD_RESERVE165 = 446,
+ MX35_PAD_RESERVE166 = 447,
+ MX35_PAD_RESERVE167 = 448,
+ MX35_PAD_RESERVE168 = 449,
+ MX35_PAD_RESERVE169 = 450,
+ MX35_PAD_RESERVE170 = 451,
+ MX35_PAD_RESERVE171 = 452,
+ MX35_PAD_RESERVE172 = 453,
+ MX35_PAD_RESERVE173 = 454,
+ MX35_PAD_RESERVE174 = 455,
+ MX35_PAD_RESERVE175 = 456,
+ MX35_PAD_RESERVE176 = 457,
+ MX35_PAD_RESERVE177 = 458,
+ MX35_PAD_RESERVE178 = 459,
+ MX35_PAD_RESERVE179 = 460,
+ MX35_PAD_RESERVE180 = 461,
+ MX35_PAD_RESERVE181 = 462,
+ MX35_PAD_RESERVE182 = 463,
+ MX35_PAD_RESERVE183 = 464,
+ MX35_PAD_RESERVE184 = 465,
+ MX35_PAD_RESERVE185 = 466,
+ MX35_PAD_RESERVE186 = 467,
+ MX35_PAD_RESERVE187 = 468,
+ MX35_PAD_RESERVE188 = 469,
+ MX35_PAD_RESERVE189 = 470,
+ MX35_PAD_RESERVE190 = 471,
+ MX35_PAD_RESERVE191 = 472,
+ MX35_PAD_RESERVE192 = 473,
+ MX35_PAD_RESERVE193 = 474,
+ MX35_PAD_RESERVE194 = 475,
+ MX35_PAD_RESERVE195 = 476,
+ MX35_PAD_RESERVE196 = 477,
+ MX35_PAD_RESERVE197 = 478,
+ MX35_PAD_RESERVE198 = 479,
+ MX35_PAD_RESERVE199 = 480,
+ MX35_PAD_RESERVE200 = 481,
+ MX35_PAD_RESERVE201 = 482,
+ MX35_PAD_EXT_ARMCLK = 483,
+ MX35_PAD_TEST_MODE = 484,
};
/* Pad names for the pinmux subsystem */
static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE0),
IMX_PINCTRL_PIN(MX35_PAD_CAPTURE),
IMX_PINCTRL_PIN(MX35_PAD_COMPARE),
IMX_PINCTRL_PIN(MX35_PAD_WDOG_RST),
@@ -1274,14 +521,7 @@ static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX35_PAD_GPIO1_1),
IMX_PINCTRL_PIN(MX35_PAD_GPIO2_0),
IMX_PINCTRL_PIN(MX35_PAD_GPIO3_0),
- IMX_PINCTRL_PIN(MX35_PAD_RESET_IN_B),
- IMX_PINCTRL_PIN(MX35_PAD_POR_B),
IMX_PINCTRL_PIN(MX35_PAD_CLKO),
- IMX_PINCTRL_PIN(MX35_PAD_BOOT_MODE0),
- IMX_PINCTRL_PIN(MX35_PAD_BOOT_MODE1),
- IMX_PINCTRL_PIN(MX35_PAD_CLK_MODE0),
- IMX_PINCTRL_PIN(MX35_PAD_CLK_MODE1),
- IMX_PINCTRL_PIN(MX35_PAD_POWER_FAIL),
IMX_PINCTRL_PIN(MX35_PAD_VSTBY),
IMX_PINCTRL_PIN(MX35_PAD_A0),
IMX_PINCTRL_PIN(MX35_PAD_A1),
@@ -1310,44 +550,6 @@ static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX35_PAD_A23),
IMX_PINCTRL_PIN(MX35_PAD_A24),
IMX_PINCTRL_PIN(MX35_PAD_A25),
- IMX_PINCTRL_PIN(MX35_PAD_SDBA1),
- IMX_PINCTRL_PIN(MX35_PAD_SDBA0),
- IMX_PINCTRL_PIN(MX35_PAD_SD0),
- IMX_PINCTRL_PIN(MX35_PAD_SD1),
- IMX_PINCTRL_PIN(MX35_PAD_SD2),
- IMX_PINCTRL_PIN(MX35_PAD_SD3),
- IMX_PINCTRL_PIN(MX35_PAD_SD4),
- IMX_PINCTRL_PIN(MX35_PAD_SD5),
- IMX_PINCTRL_PIN(MX35_PAD_SD6),
- IMX_PINCTRL_PIN(MX35_PAD_SD7),
- IMX_PINCTRL_PIN(MX35_PAD_SD8),
- IMX_PINCTRL_PIN(MX35_PAD_SD9),
- IMX_PINCTRL_PIN(MX35_PAD_SD10),
- IMX_PINCTRL_PIN(MX35_PAD_SD11),
- IMX_PINCTRL_PIN(MX35_PAD_SD12),
- IMX_PINCTRL_PIN(MX35_PAD_SD13),
- IMX_PINCTRL_PIN(MX35_PAD_SD14),
- IMX_PINCTRL_PIN(MX35_PAD_SD15),
- IMX_PINCTRL_PIN(MX35_PAD_SD16),
- IMX_PINCTRL_PIN(MX35_PAD_SD17),
- IMX_PINCTRL_PIN(MX35_PAD_SD18),
- IMX_PINCTRL_PIN(MX35_PAD_SD19),
- IMX_PINCTRL_PIN(MX35_PAD_SD20),
- IMX_PINCTRL_PIN(MX35_PAD_SD21),
- IMX_PINCTRL_PIN(MX35_PAD_SD22),
- IMX_PINCTRL_PIN(MX35_PAD_SD23),
- IMX_PINCTRL_PIN(MX35_PAD_SD24),
- IMX_PINCTRL_PIN(MX35_PAD_SD25),
- IMX_PINCTRL_PIN(MX35_PAD_SD26),
- IMX_PINCTRL_PIN(MX35_PAD_SD27),
- IMX_PINCTRL_PIN(MX35_PAD_SD28),
- IMX_PINCTRL_PIN(MX35_PAD_SD29),
- IMX_PINCTRL_PIN(MX35_PAD_SD30),
- IMX_PINCTRL_PIN(MX35_PAD_SD31),
- IMX_PINCTRL_PIN(MX35_PAD_DQM0),
- IMX_PINCTRL_PIN(MX35_PAD_DQM1),
- IMX_PINCTRL_PIN(MX35_PAD_DQM2),
- IMX_PINCTRL_PIN(MX35_PAD_DQM3),
IMX_PINCTRL_PIN(MX35_PAD_EB0),
IMX_PINCTRL_PIN(MX35_PAD_EB1),
IMX_PINCTRL_PIN(MX35_PAD_OE),
@@ -1358,42 +560,15 @@ static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX35_PAD_CS4),
IMX_PINCTRL_PIN(MX35_PAD_CS5),
IMX_PINCTRL_PIN(MX35_PAD_NF_CE0),
- IMX_PINCTRL_PIN(MX35_PAD_ECB),
IMX_PINCTRL_PIN(MX35_PAD_LBA),
IMX_PINCTRL_PIN(MX35_PAD_BCLK),
IMX_PINCTRL_PIN(MX35_PAD_RW),
- IMX_PINCTRL_PIN(MX35_PAD_RAS),
- IMX_PINCTRL_PIN(MX35_PAD_CAS),
- IMX_PINCTRL_PIN(MX35_PAD_SDWE),
- IMX_PINCTRL_PIN(MX35_PAD_SDCKE0),
- IMX_PINCTRL_PIN(MX35_PAD_SDCKE1),
- IMX_PINCTRL_PIN(MX35_PAD_SDCLK),
- IMX_PINCTRL_PIN(MX35_PAD_SDQS0),
- IMX_PINCTRL_PIN(MX35_PAD_SDQS1),
- IMX_PINCTRL_PIN(MX35_PAD_SDQS2),
- IMX_PINCTRL_PIN(MX35_PAD_SDQS3),
IMX_PINCTRL_PIN(MX35_PAD_NFWE_B),
IMX_PINCTRL_PIN(MX35_PAD_NFRE_B),
IMX_PINCTRL_PIN(MX35_PAD_NFALE),
IMX_PINCTRL_PIN(MX35_PAD_NFCLE),
IMX_PINCTRL_PIN(MX35_PAD_NFWP_B),
IMX_PINCTRL_PIN(MX35_PAD_NFRB),
- IMX_PINCTRL_PIN(MX35_PAD_D15),
- IMX_PINCTRL_PIN(MX35_PAD_D14),
- IMX_PINCTRL_PIN(MX35_PAD_D13),
- IMX_PINCTRL_PIN(MX35_PAD_D12),
- IMX_PINCTRL_PIN(MX35_PAD_D11),
- IMX_PINCTRL_PIN(MX35_PAD_D10),
- IMX_PINCTRL_PIN(MX35_PAD_D9),
- IMX_PINCTRL_PIN(MX35_PAD_D8),
- IMX_PINCTRL_PIN(MX35_PAD_D7),
- IMX_PINCTRL_PIN(MX35_PAD_D6),
- IMX_PINCTRL_PIN(MX35_PAD_D5),
- IMX_PINCTRL_PIN(MX35_PAD_D4),
- IMX_PINCTRL_PIN(MX35_PAD_D3),
- IMX_PINCTRL_PIN(MX35_PAD_D2),
- IMX_PINCTRL_PIN(MX35_PAD_D1),
- IMX_PINCTRL_PIN(MX35_PAD_D0),
IMX_PINCTRL_PIN(MX35_PAD_CSI_D8),
IMX_PINCTRL_PIN(MX35_PAD_CSI_D9),
IMX_PINCTRL_PIN(MX35_PAD_CSI_D10),
@@ -1444,14 +619,6 @@ static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX35_PAD_TXD2),
IMX_PINCTRL_PIN(MX35_PAD_RTS2),
IMX_PINCTRL_PIN(MX35_PAD_CTS2),
- IMX_PINCTRL_PIN(MX35_PAD_RTCK),
- IMX_PINCTRL_PIN(MX35_PAD_TCK),
- IMX_PINCTRL_PIN(MX35_PAD_TMS),
- IMX_PINCTRL_PIN(MX35_PAD_TDI),
- IMX_PINCTRL_PIN(MX35_PAD_TDO),
- IMX_PINCTRL_PIN(MX35_PAD_TRSTB),
- IMX_PINCTRL_PIN(MX35_PAD_DE_B),
- IMX_PINCTRL_PIN(MX35_PAD_SJC_MOD),
IMX_PINCTRL_PIN(MX35_PAD_USBOTG_PWR),
IMX_PINCTRL_PIN(MX35_PAD_USBOTG_OC),
IMX_PINCTRL_PIN(MX35_PAD_LD0),
@@ -1548,6 +715,287 @@ static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX35_PAD_FEC_TDATA2),
IMX_PINCTRL_PIN(MX35_PAD_FEC_RDATA3),
IMX_PINCTRL_PIN(MX35_PAD_FEC_TDATA3),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX35_PAD_RESET_IN_B),
+ IMX_PINCTRL_PIN(MX35_PAD_POR_B),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(MX35_PAD_BOOT_MODE0),
+ IMX_PINCTRL_PIN(MX35_PAD_BOOT_MODE1),
+ IMX_PINCTRL_PIN(MX35_PAD_CLK_MODE0),
+ IMX_PINCTRL_PIN(MX35_PAD_CLK_MODE1),
+ IMX_PINCTRL_PIN(MX35_PAD_POWER_FAIL),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE11),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE16),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE17),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE18),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE19),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE20),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE21),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE22),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE23),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE24),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE25),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE26),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE27),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE28),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE29),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE30),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE31),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE32),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE33),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE34),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE35),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE36),
+ IMX_PINCTRL_PIN(MX35_PAD_SDBA1),
+ IMX_PINCTRL_PIN(MX35_PAD_SDBA0),
+ IMX_PINCTRL_PIN(MX35_PAD_SD0),
+ IMX_PINCTRL_PIN(MX35_PAD_SD1),
+ IMX_PINCTRL_PIN(MX35_PAD_SD2),
+ IMX_PINCTRL_PIN(MX35_PAD_SD3),
+ IMX_PINCTRL_PIN(MX35_PAD_SD4),
+ IMX_PINCTRL_PIN(MX35_PAD_SD5),
+ IMX_PINCTRL_PIN(MX35_PAD_SD6),
+ IMX_PINCTRL_PIN(MX35_PAD_SD7),
+ IMX_PINCTRL_PIN(MX35_PAD_SD8),
+ IMX_PINCTRL_PIN(MX35_PAD_SD9),
+ IMX_PINCTRL_PIN(MX35_PAD_SD10),
+ IMX_PINCTRL_PIN(MX35_PAD_SD11),
+ IMX_PINCTRL_PIN(MX35_PAD_SD12),
+ IMX_PINCTRL_PIN(MX35_PAD_SD13),
+ IMX_PINCTRL_PIN(MX35_PAD_SD14),
+ IMX_PINCTRL_PIN(MX35_PAD_SD15),
+ IMX_PINCTRL_PIN(MX35_PAD_SD16),
+ IMX_PINCTRL_PIN(MX35_PAD_SD17),
+ IMX_PINCTRL_PIN(MX35_PAD_SD18),
+ IMX_PINCTRL_PIN(MX35_PAD_SD19),
+ IMX_PINCTRL_PIN(MX35_PAD_SD20),
+ IMX_PINCTRL_PIN(MX35_PAD_SD21),
+ IMX_PINCTRL_PIN(MX35_PAD_SD22),
+ IMX_PINCTRL_PIN(MX35_PAD_SD23),
+ IMX_PINCTRL_PIN(MX35_PAD_SD24),
+ IMX_PINCTRL_PIN(MX35_PAD_SD25),
+ IMX_PINCTRL_PIN(MX35_PAD_SD26),
+ IMX_PINCTRL_PIN(MX35_PAD_SD27),
+ IMX_PINCTRL_PIN(MX35_PAD_SD28),
+ IMX_PINCTRL_PIN(MX35_PAD_SD29),
+ IMX_PINCTRL_PIN(MX35_PAD_SD30),
+ IMX_PINCTRL_PIN(MX35_PAD_SD31),
+ IMX_PINCTRL_PIN(MX35_PAD_DQM0),
+ IMX_PINCTRL_PIN(MX35_PAD_DQM1),
+ IMX_PINCTRL_PIN(MX35_PAD_DQM2),
+ IMX_PINCTRL_PIN(MX35_PAD_DQM3),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE37),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE38),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE39),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE40),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE41),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE42),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE43),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE44),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE45),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE46),
+ IMX_PINCTRL_PIN(MX35_PAD_ECB),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE47),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE48),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE49),
+ IMX_PINCTRL_PIN(MX35_PAD_RAS),
+ IMX_PINCTRL_PIN(MX35_PAD_CAS),
+ IMX_PINCTRL_PIN(MX35_PAD_SDWE),
+ IMX_PINCTRL_PIN(MX35_PAD_SDCKE0),
+ IMX_PINCTRL_PIN(MX35_PAD_SDCKE1),
+ IMX_PINCTRL_PIN(MX35_PAD_SDCLK),
+ IMX_PINCTRL_PIN(MX35_PAD_SDQS0),
+ IMX_PINCTRL_PIN(MX35_PAD_SDQS1),
+ IMX_PINCTRL_PIN(MX35_PAD_SDQS2),
+ IMX_PINCTRL_PIN(MX35_PAD_SDQS3),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE50),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE51),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE52),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE53),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE54),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE55),
+ IMX_PINCTRL_PIN(MX35_PAD_D15),
+ IMX_PINCTRL_PIN(MX35_PAD_D14),
+ IMX_PINCTRL_PIN(MX35_PAD_D13),
+ IMX_PINCTRL_PIN(MX35_PAD_D12),
+ IMX_PINCTRL_PIN(MX35_PAD_D11),
+ IMX_PINCTRL_PIN(MX35_PAD_D10),
+ IMX_PINCTRL_PIN(MX35_PAD_D9),
+ IMX_PINCTRL_PIN(MX35_PAD_D8),
+ IMX_PINCTRL_PIN(MX35_PAD_D7),
+ IMX_PINCTRL_PIN(MX35_PAD_D6),
+ IMX_PINCTRL_PIN(MX35_PAD_D5),
+ IMX_PINCTRL_PIN(MX35_PAD_D4),
+ IMX_PINCTRL_PIN(MX35_PAD_D3),
+ IMX_PINCTRL_PIN(MX35_PAD_D2),
+ IMX_PINCTRL_PIN(MX35_PAD_D1),
+ IMX_PINCTRL_PIN(MX35_PAD_D0),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE56),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE57),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE58),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE59),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE60),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE61),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE62),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE63),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE64),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE65),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE66),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE67),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE68),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE69),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE70),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE71),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE72),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE73),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE74),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE75),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE76),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE77),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE78),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE79),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE80),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE81),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE82),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE83),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE84),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE85),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE86),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE87),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE88),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE89),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE90),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE91),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE92),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE93),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE94),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE95),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE96),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE97),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE98),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE99),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE100),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE101),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE102),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE103),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE104),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE105),
+ IMX_PINCTRL_PIN(MX35_PAD_RTCK),
+ IMX_PINCTRL_PIN(MX35_PAD_TCK),
+ IMX_PINCTRL_PIN(MX35_PAD_TMS),
+ IMX_PINCTRL_PIN(MX35_PAD_TDI),
+ IMX_PINCTRL_PIN(MX35_PAD_TDO),
+ IMX_PINCTRL_PIN(MX35_PAD_TRSTB),
+ IMX_PINCTRL_PIN(MX35_PAD_DE_B),
+ IMX_PINCTRL_PIN(MX35_PAD_SJC_MOD),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE106),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE107),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE108),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE109),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE110),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE111),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE112),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE113),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE114),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE115),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE116),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE117),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE118),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE119),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE120),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE121),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE122),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE123),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE124),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE125),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE126),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE127),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE128),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE129),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE130),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE131),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE132),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE133),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE134),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE135),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE136),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE137),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE138),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE139),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE140),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE141),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE142),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE143),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE144),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE145),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE146),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE147),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE148),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE149),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE150),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE151),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE152),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE153),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE154),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE155),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE156),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE157),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE158),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE159),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE160),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE161),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE162),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE163),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE164),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE165),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE166),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE167),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE168),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE169),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE170),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE171),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE172),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE173),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE174),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE175),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE176),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE177),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE178),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE179),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE180),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE181),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE182),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE183),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE184),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE185),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE186),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE187),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE188),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE189),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE190),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE191),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE192),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE193),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE194),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE195),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE196),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE197),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE198),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE199),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE200),
+ IMX_PINCTRL_PIN(MX35_PAD_RESERVE201),
IMX_PINCTRL_PIN(MX35_PAD_EXT_ARMCLK),
IMX_PINCTRL_PIN(MX35_PAD_TEST_MODE),
};
@@ -1555,8 +1003,6 @@ static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx35_pinctrl_info = {
.pins = imx35_pinctrl_pads,
.npins = ARRAY_SIZE(imx35_pinctrl_pads),
- .pin_regs = imx35_pin_regs,
- .npin_regs = ARRAY_SIZE(imx35_pin_regs),
};
static struct of_device_id imx35_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
index 9a92aaa..db268b9 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -23,1015 +23,400 @@
#include "pinctrl-imx.h"
enum imx51_pads {
- MX51_PAD_EIM_D16 = 0,
- MX51_PAD_EIM_D17 = 1,
- MX51_PAD_EIM_D18 = 2,
- MX51_PAD_EIM_D19 = 3,
- MX51_PAD_EIM_D20 = 4,
- MX51_PAD_EIM_D21 = 5,
- MX51_PAD_EIM_D22 = 6,
- MX51_PAD_EIM_D23 = 7,
- MX51_PAD_EIM_D24 = 8,
- MX51_PAD_EIM_D25 = 9,
- MX51_PAD_EIM_D26 = 10,
- MX51_PAD_EIM_D27 = 11,
- MX51_PAD_EIM_D28 = 12,
- MX51_PAD_EIM_D29 = 13,
- MX51_PAD_EIM_D30 = 14,
- MX51_PAD_EIM_D31 = 15,
- MX51_PAD_EIM_A16 = 16,
- MX51_PAD_EIM_A17 = 17,
- MX51_PAD_EIM_A18 = 18,
- MX51_PAD_EIM_A19 = 19,
- MX51_PAD_EIM_A20 = 20,
- MX51_PAD_EIM_A21 = 21,
- MX51_PAD_EIM_A22 = 22,
- MX51_PAD_EIM_A23 = 23,
- MX51_PAD_EIM_A24 = 24,
- MX51_PAD_EIM_A25 = 25,
- MX51_PAD_EIM_A26 = 26,
- MX51_PAD_EIM_A27 = 27,
- MX51_PAD_EIM_EB0 = 28,
- MX51_PAD_EIM_EB1 = 29,
- MX51_PAD_EIM_EB2 = 30,
- MX51_PAD_EIM_EB3 = 31,
- MX51_PAD_EIM_OE = 32,
- MX51_PAD_EIM_CS0 = 33,
- MX51_PAD_EIM_CS1 = 34,
- MX51_PAD_EIM_CS2 = 35,
- MX51_PAD_EIM_CS3 = 36,
- MX51_PAD_EIM_CS4 = 37,
- MX51_PAD_EIM_CS5 = 38,
- MX51_PAD_EIM_DTACK = 39,
- MX51_PAD_EIM_LBA = 40,
- MX51_PAD_EIM_CRE = 41,
- MX51_PAD_DRAM_CS1 = 42,
- MX51_PAD_NANDF_WE_B = 43,
- MX51_PAD_NANDF_RE_B = 44,
- MX51_PAD_NANDF_ALE = 45,
- MX51_PAD_NANDF_CLE = 46,
- MX51_PAD_NANDF_WP_B = 47,
- MX51_PAD_NANDF_RB0 = 48,
- MX51_PAD_NANDF_RB1 = 49,
- MX51_PAD_NANDF_RB2 = 50,
- MX51_PAD_NANDF_RB3 = 51,
- MX51_PAD_GPIO_NAND = 52,
- MX51_PAD_NANDF_CS0 = 53,
- MX51_PAD_NANDF_CS1 = 54,
- MX51_PAD_NANDF_CS2 = 55,
- MX51_PAD_NANDF_CS3 = 56,
- MX51_PAD_NANDF_CS4 = 57,
- MX51_PAD_NANDF_CS5 = 58,
- MX51_PAD_NANDF_CS6 = 59,
- MX51_PAD_NANDF_CS7 = 60,
- MX51_PAD_NANDF_RDY_INT = 61,
- MX51_PAD_NANDF_D15 = 62,
- MX51_PAD_NANDF_D14 = 63,
- MX51_PAD_NANDF_D13 = 64,
- MX51_PAD_NANDF_D12 = 65,
- MX51_PAD_NANDF_D11 = 66,
- MX51_PAD_NANDF_D10 = 67,
- MX51_PAD_NANDF_D9 = 68,
- MX51_PAD_NANDF_D8 = 69,
- MX51_PAD_NANDF_D7 = 70,
- MX51_PAD_NANDF_D6 = 71,
- MX51_PAD_NANDF_D5 = 72,
- MX51_PAD_NANDF_D4 = 73,
- MX51_PAD_NANDF_D3 = 74,
- MX51_PAD_NANDF_D2 = 75,
- MX51_PAD_NANDF_D1 = 76,
- MX51_PAD_NANDF_D0 = 77,
- MX51_PAD_CSI1_D8 = 78,
- MX51_PAD_CSI1_D9 = 79,
- MX51_PAD_CSI1_D10 = 80,
- MX51_PAD_CSI1_D11 = 81,
- MX51_PAD_CSI1_D12 = 82,
- MX51_PAD_CSI1_D13 = 83,
- MX51_PAD_CSI1_D14 = 84,
- MX51_PAD_CSI1_D15 = 85,
- MX51_PAD_CSI1_D16 = 86,
- MX51_PAD_CSI1_D17 = 87,
- MX51_PAD_CSI1_D18 = 88,
- MX51_PAD_CSI1_D19 = 89,
- MX51_PAD_CSI1_VSYNC = 90,
- MX51_PAD_CSI1_HSYNC = 91,
- MX51_PAD_CSI1_PIXCLK = 92,
- MX51_PAD_CSI1_MCLK = 93,
- MX51_PAD_CSI2_D12 = 94,
- MX51_PAD_CSI2_D13 = 95,
- MX51_PAD_CSI2_D14 = 96,
- MX51_PAD_CSI2_D15 = 97,
- MX51_PAD_CSI2_D16 = 98,
- MX51_PAD_CSI2_D17 = 99,
- MX51_PAD_CSI2_D18 = 100,
- MX51_PAD_CSI2_D19 = 101,
- MX51_PAD_CSI2_VSYNC = 102,
- MX51_PAD_CSI2_HSYNC = 103,
- MX51_PAD_CSI2_PIXCLK = 104,
- MX51_PAD_I2C1_CLK = 105,
- MX51_PAD_I2C1_DAT = 106,
- MX51_PAD_AUD3_BB_TXD = 107,
- MX51_PAD_AUD3_BB_RXD = 108,
- MX51_PAD_AUD3_BB_CK = 109,
- MX51_PAD_AUD3_BB_FS = 110,
- MX51_PAD_CSPI1_MOSI = 111,
- MX51_PAD_CSPI1_MISO = 112,
- MX51_PAD_CSPI1_SS0 = 113,
- MX51_PAD_CSPI1_SS1 = 114,
- MX51_PAD_CSPI1_RDY = 115,
- MX51_PAD_CSPI1_SCLK = 116,
- MX51_PAD_UART1_RXD = 117,
- MX51_PAD_UART1_TXD = 118,
- MX51_PAD_UART1_RTS = 119,
- MX51_PAD_UART1_CTS = 120,
- MX51_PAD_UART2_RXD = 121,
- MX51_PAD_UART2_TXD = 122,
- MX51_PAD_UART3_RXD = 123,
- MX51_PAD_UART3_TXD = 124,
- MX51_PAD_OWIRE_LINE = 125,
- MX51_PAD_KEY_ROW0 = 126,
- MX51_PAD_KEY_ROW1 = 127,
- MX51_PAD_KEY_ROW2 = 128,
- MX51_PAD_KEY_ROW3 = 129,
- MX51_PAD_KEY_COL0 = 130,
- MX51_PAD_KEY_COL1 = 131,
- MX51_PAD_KEY_COL2 = 132,
- MX51_PAD_KEY_COL3 = 133,
- MX51_PAD_KEY_COL4 = 134,
- MX51_PAD_KEY_COL5 = 135,
- MX51_PAD_USBH1_CLK = 136,
- MX51_PAD_USBH1_DIR = 137,
- MX51_PAD_USBH1_STP = 138,
- MX51_PAD_USBH1_NXT = 139,
- MX51_PAD_USBH1_DATA0 = 140,
- MX51_PAD_USBH1_DATA1 = 141,
- MX51_PAD_USBH1_DATA2 = 142,
- MX51_PAD_USBH1_DATA3 = 143,
- MX51_PAD_USBH1_DATA4 = 144,
- MX51_PAD_USBH1_DATA5 = 145,
- MX51_PAD_USBH1_DATA6 = 146,
- MX51_PAD_USBH1_DATA7 = 147,
- MX51_PAD_DI1_PIN11 = 148,
- MX51_PAD_DI1_PIN12 = 149,
- MX51_PAD_DI1_PIN13 = 150,
- MX51_PAD_DI1_D0_CS = 151,
- MX51_PAD_DI1_D1_CS = 152,
- MX51_PAD_DISPB2_SER_DIN = 153,
- MX51_PAD_DISPB2_SER_DIO = 154,
- MX51_PAD_DISPB2_SER_CLK = 155,
- MX51_PAD_DISPB2_SER_RS = 156,
- MX51_PAD_DISP1_DAT0 = 157,
- MX51_PAD_DISP1_DAT1 = 158,
- MX51_PAD_DISP1_DAT2 = 159,
- MX51_PAD_DISP1_DAT3 = 160,
- MX51_PAD_DISP1_DAT4 = 161,
- MX51_PAD_DISP1_DAT5 = 162,
- MX51_PAD_DISP1_DAT6 = 163,
- MX51_PAD_DISP1_DAT7 = 164,
- MX51_PAD_DISP1_DAT8 = 165,
- MX51_PAD_DISP1_DAT9 = 166,
- MX51_PAD_DISP1_DAT10 = 167,
- MX51_PAD_DISP1_DAT11 = 168,
- MX51_PAD_DISP1_DAT12 = 169,
- MX51_PAD_DISP1_DAT13 = 170,
- MX51_PAD_DISP1_DAT14 = 171,
- MX51_PAD_DISP1_DAT15 = 172,
- MX51_PAD_DISP1_DAT16 = 173,
- MX51_PAD_DISP1_DAT17 = 174,
- MX51_PAD_DISP1_DAT18 = 175,
- MX51_PAD_DISP1_DAT19 = 176,
- MX51_PAD_DISP1_DAT20 = 177,
- MX51_PAD_DISP1_DAT21 = 178,
- MX51_PAD_DISP1_DAT22 = 179,
- MX51_PAD_DISP1_DAT23 = 180,
- MX51_PAD_DI1_PIN3 = 181,
- MX51_PAD_DI1_PIN2 = 182,
- MX51_PAD_DI_GP2 = 183,
- MX51_PAD_DI_GP3 = 184,
- MX51_PAD_DI2_PIN4 = 185,
- MX51_PAD_DI2_PIN2 = 186,
- MX51_PAD_DI2_PIN3 = 187,
- MX51_PAD_DI2_DISP_CLK = 188,
- MX51_PAD_DI_GP4 = 189,
- MX51_PAD_DISP2_DAT0 = 190,
- MX51_PAD_DISP2_DAT1 = 191,
- MX51_PAD_DISP2_DAT2 = 192,
- MX51_PAD_DISP2_DAT3 = 193,
- MX51_PAD_DISP2_DAT4 = 194,
- MX51_PAD_DISP2_DAT5 = 195,
- MX51_PAD_DISP2_DAT6 = 196,
- MX51_PAD_DISP2_DAT7 = 197,
- MX51_PAD_DISP2_DAT8 = 198,
- MX51_PAD_DISP2_DAT9 = 199,
- MX51_PAD_DISP2_DAT10 = 200,
- MX51_PAD_DISP2_DAT11 = 201,
- MX51_PAD_DISP2_DAT12 = 202,
- MX51_PAD_DISP2_DAT13 = 203,
- MX51_PAD_DISP2_DAT14 = 204,
- MX51_PAD_DISP2_DAT15 = 205,
- MX51_PAD_SD1_CMD = 206,
- MX51_PAD_SD1_CLK = 207,
- MX51_PAD_SD1_DATA0 = 208,
- MX51_PAD_EIM_DA0 = 209,
- MX51_PAD_EIM_DA1 = 210,
- MX51_PAD_EIM_DA2 = 211,
- MX51_PAD_EIM_DA3 = 212,
- MX51_PAD_SD1_DATA1 = 213,
- MX51_PAD_EIM_DA4 = 214,
- MX51_PAD_EIM_DA5 = 215,
- MX51_PAD_EIM_DA6 = 216,
- MX51_PAD_EIM_DA7 = 217,
- MX51_PAD_SD1_DATA2 = 218,
- MX51_PAD_EIM_DA10 = 219,
- MX51_PAD_EIM_DA11 = 220,
- MX51_PAD_EIM_DA8 = 221,
- MX51_PAD_EIM_DA9 = 222,
- MX51_PAD_SD1_DATA3 = 223,
- MX51_PAD_GPIO1_0 = 224,
- MX51_PAD_GPIO1_1 = 225,
- MX51_PAD_EIM_DA12 = 226,
- MX51_PAD_EIM_DA13 = 227,
- MX51_PAD_EIM_DA14 = 228,
- MX51_PAD_EIM_DA15 = 229,
- MX51_PAD_SD2_CMD = 230,
- MX51_PAD_SD2_CLK = 231,
- MX51_PAD_SD2_DATA0 = 232,
- MX51_PAD_SD2_DATA1 = 233,
- MX51_PAD_SD2_DATA2 = 234,
- MX51_PAD_SD2_DATA3 = 235,
- MX51_PAD_GPIO1_2 = 236,
- MX51_PAD_GPIO1_3 = 237,
- MX51_PAD_PMIC_INT_REQ = 238,
- MX51_PAD_GPIO1_4 = 239,
- MX51_PAD_GPIO1_5 = 240,
- MX51_PAD_GPIO1_6 = 241,
- MX51_PAD_GPIO1_7 = 242,
- MX51_PAD_GPIO1_8 = 243,
- MX51_PAD_GPIO1_9 = 244,
-};
-
-/* imx51 register maps */
-static struct imx_pin_reg imx51_pin_regs[] = {
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 5, 0x000, 0), /* MX51_PAD_EIM_D16__AUD4_RXFS */
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 7, 0x8d8, 0), /* MX51_PAD_EIM_D16__AUD5_TXD */
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 0, 0x000, 0), /* MX51_PAD_EIM_D16__EIM_D16 */
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 1, 0x000, 0), /* MX51_PAD_EIM_D16__GPIO2_0 */
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 4, 0x9b4, 0), /* MX51_PAD_EIM_D16__I2C1_SDA */
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 3, 0x000, 0), /* MX51_PAD_EIM_D16__UART2_CTS */
- IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 2, 0x000, 0), /* MX51_PAD_EIM_D16__USBH2_DATA0 */
- IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 7, 0x8d4, 0), /* MX51_PAD_EIM_D17__AUD5_RXD */
- IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 0, 0x000, 0), /* MX51_PAD_EIM_D17__EIM_D17 */
- IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 1, 0x000, 0), /* MX51_PAD_EIM_D17__GPIO2_1 */
- IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 3, 0x9ec, 0), /* MX51_PAD_EIM_D17__UART2_RXD */
- IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 4, 0x000, 0), /* MX51_PAD_EIM_D17__UART3_CTS */
- IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 2, 0x000, 0), /* MX51_PAD_EIM_D17__USBH2_DATA1 */
- IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 7, 0x8e4, 0), /* MX51_PAD_EIM_D18__AUD5_TXC */
- IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 0, 0x000, 0), /* MX51_PAD_EIM_D18__EIM_D18 */
- IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 1, 0x000, 0), /* MX51_PAD_EIM_D18__GPIO2_2 */
- IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 3, 0x000, 0), /* MX51_PAD_EIM_D18__UART2_TXD */
- IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 4, 0x9f0, 1), /* MX51_PAD_EIM_D18__UART3_RTS */
- IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 2, 0x000, 0), /* MX51_PAD_EIM_D18__USBH2_DATA2 */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 5, 0x000, 0), /* MX51_PAD_EIM_D19__AUD4_RXC */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 7, 0x8e8, 0), /* MX51_PAD_EIM_D19__AUD5_TXFS */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 0, 0x000, 0), /* MX51_PAD_EIM_D19__EIM_D19 */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 1, 0x000, 0), /* MX51_PAD_EIM_D19__GPIO2_3 */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 4, 0x9b0, 0), /* MX51_PAD_EIM_D19__I2C1_SCL */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 3, 0x9e8, 1), /* MX51_PAD_EIM_D19__UART2_RTS */
- IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 2, 0x000, 0), /* MX51_PAD_EIM_D19__USBH2_DATA3 */
- IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 5, 0x8c8, 0), /* MX51_PAD_EIM_D20__AUD4_TXD */
- IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 0, 0x000, 0), /* MX51_PAD_EIM_D20__EIM_D20 */
- IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 1, 0x000, 0), /* MX51_PAD_EIM_D20__GPIO2_4 */
- IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 4, 0x000, 0), /* MX51_PAD_EIM_D20__SRTC_ALARM_DEB */
- IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 2, 0x000, 0), /* MX51_PAD_EIM_D20__USBH2_DATA4 */
- IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 5, 0x8c4, 0), /* MX51_PAD_EIM_D21__AUD4_RXD */
- IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 0, 0x000, 0), /* MX51_PAD_EIM_D21__EIM_D21 */
- IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 1, 0x000, 0), /* MX51_PAD_EIM_D21__GPIO2_5 */
- IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 3, 0x000, 0), /* MX51_PAD_EIM_D21__SRTC_ALARM_DEB */
- IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 2, 0x000, 0), /* MX51_PAD_EIM_D21__USBH2_DATA5 */
- IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 5, 0x8cc, 0), /* MX51_PAD_EIM_D22__AUD4_TXC */
- IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 0, 0x000, 0), /* MX51_PAD_EIM_D22__EIM_D22 */
- IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 1, 0x000, 0), /* MX51_PAD_EIM_D22__GPIO2_6 */
- IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 2, 0x000, 0), /* MX51_PAD_EIM_D22__USBH2_DATA6 */
- IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 5, 0x8d0, 0), /* MX51_PAD_EIM_D23__AUD4_TXFS */
- IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 0, 0x000, 0), /* MX51_PAD_EIM_D23__EIM_D23 */
- IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 1, 0x000, 0), /* MX51_PAD_EIM_D23__GPIO2_7 */
- IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 4, 0x000, 0), /* MX51_PAD_EIM_D23__SPDIF_OUT1 */
- IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 2, 0x000, 0), /* MX51_PAD_EIM_D23__USBH2_DATA7 */
- IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 5, 0x8f8, 0), /* MX51_PAD_EIM_D24__AUD6_RXFS */
- IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 0, 0x000, 0), /* MX51_PAD_EIM_D24__EIM_D24 */
- IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 1, 0x000, 0), /* MX51_PAD_EIM_D24__GPIO2_8 */
- IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 4, 0x9bc, 0), /* MX51_PAD_EIM_D24__I2C2_SDA */
- IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 3, 0x000, 0), /* MX51_PAD_EIM_D24__UART3_CTS */
- IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 2, 0x000, 0), /* MX51_PAD_EIM_D24__USBOTG_DATA0 */
- IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 0, 0x000, 0), /* MX51_PAD_EIM_D25__EIM_D25 */
- IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 1, 0x9c8, 0), /* MX51_PAD_EIM_D25__KEY_COL6 */
- IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 4, 0x000, 0), /* MX51_PAD_EIM_D25__UART2_CTS */
- IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 3, 0x9f4, 0), /* MX51_PAD_EIM_D25__UART3_RXD */
- IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 2, 0x000, 0), /* MX51_PAD_EIM_D25__USBOTG_DATA1 */
- IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 0, 0x000, 0), /* MX51_PAD_EIM_D26__EIM_D26 */
- IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 1, 0x9cc, 0), /* MX51_PAD_EIM_D26__KEY_COL7 */
- IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 4, 0x9e8, 3), /* MX51_PAD_EIM_D26__UART2_RTS */
- IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 3, 0x000, 0), /* MX51_PAD_EIM_D26__UART3_TXD */
- IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 2, 0x000, 0), /* MX51_PAD_EIM_D26__USBOTG_DATA2 */
- IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 5, 0x8f4, 0), /* MX51_PAD_EIM_D27__AUD6_RXC */
- IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 0, 0x000, 0), /* MX51_PAD_EIM_D27__EIM_D27 */
- IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 1, 0x000, 0), /* MX51_PAD_EIM_D27__GPIO2_9 */
- IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 4, 0x9b8, 0), /* MX51_PAD_EIM_D27__I2C2_SCL */
- IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 3, 0x9f0, 3), /* MX51_PAD_EIM_D27__UART3_RTS */
- IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 2, 0x000, 0), /* MX51_PAD_EIM_D27__USBOTG_DATA3 */
- IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 5, 0x8f0, 0), /* MX51_PAD_EIM_D28__AUD6_TXD */
- IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 0, 0x000, 0), /* MX51_PAD_EIM_D28__EIM_D28 */
- IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 1, 0x9d0, 0), /* MX51_PAD_EIM_D28__KEY_ROW4 */
- IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 2, 0x000, 0), /* MX51_PAD_EIM_D28__USBOTG_DATA4 */
- IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 5, 0x8ec, 0), /* MX51_PAD_EIM_D29__AUD6_RXD */
- IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 0, 0x000, 0), /* MX51_PAD_EIM_D29__EIM_D29 */
- IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 1, 0x9d4, 0), /* MX51_PAD_EIM_D29__KEY_ROW5 */
- IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 2, 0x000, 0), /* MX51_PAD_EIM_D29__USBOTG_DATA5 */
- IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 5, 0x8fc, 0), /* MX51_PAD_EIM_D30__AUD6_TXC */
- IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 0, 0x000, 0), /* MX51_PAD_EIM_D30__EIM_D30 */
- IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 1, 0x9d8, 0), /* MX51_PAD_EIM_D30__KEY_ROW6 */
- IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 2, 0x000, 0), /* MX51_PAD_EIM_D30__USBOTG_DATA6 */
- IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 5, 0x900, 0), /* MX51_PAD_EIM_D31__AUD6_TXFS */
- IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 0, 0x000, 0), /* MX51_PAD_EIM_D31__EIM_D31 */
- IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 1, 0x9dc, 0), /* MX51_PAD_EIM_D31__KEY_ROW7 */
- IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 2, 0x000, 0), /* MX51_PAD_EIM_D31__USBOTG_DATA7 */
- IMX_PIN_REG(MX51_PAD_EIM_A16, 0x430, 0x09c, 0, 0x000, 0), /* MX51_PAD_EIM_A16__EIM_A16 */
- IMX_PIN_REG(MX51_PAD_EIM_A16, 0x430, 0x09c, 1, 0x000, 0), /* MX51_PAD_EIM_A16__GPIO2_10 */
- IMX_PIN_REG(MX51_PAD_EIM_A16, 0x430, 0x09c, 7, 0x000, 0), /* MX51_PAD_EIM_A16__OSC_FREQ_SEL0 */
- IMX_PIN_REG(MX51_PAD_EIM_A17, 0x434, 0x0a0, 0, 0x000, 0), /* MX51_PAD_EIM_A17__EIM_A17 */
- IMX_PIN_REG(MX51_PAD_EIM_A17, 0x434, 0x0a0, 1, 0x000, 0), /* MX51_PAD_EIM_A17__GPIO2_11 */
- IMX_PIN_REG(MX51_PAD_EIM_A17, 0x434, 0x0a0, 7, 0x000, 0), /* MX51_PAD_EIM_A17__OSC_FREQ_SEL1 */
- IMX_PIN_REG(MX51_PAD_EIM_A18, 0x438, 0x0a4, 7, 0x000, 0), /* MX51_PAD_EIM_A18__BOOT_LPB0 */
- IMX_PIN_REG(MX51_PAD_EIM_A18, 0x438, 0x0a4, 0, 0x000, 0), /* MX51_PAD_EIM_A18__EIM_A18 */
- IMX_PIN_REG(MX51_PAD_EIM_A18, 0x438, 0x0a4, 1, 0x000, 0), /* MX51_PAD_EIM_A18__GPIO2_12 */
- IMX_PIN_REG(MX51_PAD_EIM_A19, 0x43c, 0x0a8, 7, 0x000, 0), /* MX51_PAD_EIM_A19__BOOT_LPB1 */
- IMX_PIN_REG(MX51_PAD_EIM_A19, 0x43c, 0x0a8, 0, 0x000, 0), /* MX51_PAD_EIM_A19__EIM_A19 */
- IMX_PIN_REG(MX51_PAD_EIM_A19, 0x43c, 0x0a8, 1, 0x000, 0), /* MX51_PAD_EIM_A19__GPIO2_13 */
- IMX_PIN_REG(MX51_PAD_EIM_A20, 0x440, 0x0ac, 7, 0x000, 0), /* MX51_PAD_EIM_A20__BOOT_UART_SRC0 */
- IMX_PIN_REG(MX51_PAD_EIM_A20, 0x440, 0x0ac, 0, 0x000, 0), /* MX51_PAD_EIM_A20__EIM_A20 */
- IMX_PIN_REG(MX51_PAD_EIM_A20, 0x440, 0x0ac, 1, 0x000, 0), /* MX51_PAD_EIM_A20__GPIO2_14 */
- IMX_PIN_REG(MX51_PAD_EIM_A21, 0x444, 0x0b0, 7, 0x000, 0), /* MX51_PAD_EIM_A21__BOOT_UART_SRC1 */
- IMX_PIN_REG(MX51_PAD_EIM_A21, 0x444, 0x0b0, 0, 0x000, 0), /* MX51_PAD_EIM_A21__EIM_A21 */
- IMX_PIN_REG(MX51_PAD_EIM_A21, 0x444, 0x0b0, 1, 0x000, 0), /* MX51_PAD_EIM_A21__GPIO2_15 */
- IMX_PIN_REG(MX51_PAD_EIM_A22, 0x448, 0x0b4, 0, 0x000, 0), /* MX51_PAD_EIM_A22__EIM_A22 */
- IMX_PIN_REG(MX51_PAD_EIM_A22, 0x448, 0x0b4, 1, 0x000, 0), /* MX51_PAD_EIM_A22__GPIO2_16 */
- IMX_PIN_REG(MX51_PAD_EIM_A23, 0x44c, 0x0b8, 7, 0x000, 0), /* MX51_PAD_EIM_A23__BOOT_HPN_EN */
- IMX_PIN_REG(MX51_PAD_EIM_A23, 0x44c, 0x0b8, 0, 0x000, 0), /* MX51_PAD_EIM_A23__EIM_A23 */
- IMX_PIN_REG(MX51_PAD_EIM_A23, 0x44c, 0x0b8, 1, 0x000, 0), /* MX51_PAD_EIM_A23__GPIO2_17 */
- IMX_PIN_REG(MX51_PAD_EIM_A24, 0x450, 0x0bc, 0, 0x000, 0), /* MX51_PAD_EIM_A24__EIM_A24 */
- IMX_PIN_REG(MX51_PAD_EIM_A24, 0x450, 0x0bc, 1, 0x000, 0), /* MX51_PAD_EIM_A24__GPIO2_18 */
- IMX_PIN_REG(MX51_PAD_EIM_A24, 0x450, 0x0bc, 2, 0x000, 0), /* MX51_PAD_EIM_A24__USBH2_CLK */
- IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 6, 0x000, 0), /* MX51_PAD_EIM_A25__DISP1_PIN4 */
- IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 0, 0x000, 0), /* MX51_PAD_EIM_A25__EIM_A25 */
- IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 1, 0x000, 0), /* MX51_PAD_EIM_A25__GPIO2_19 */
- IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 2, 0x000, 0), /* MX51_PAD_EIM_A25__USBH2_DIR */
- IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 5, 0x9a0, 0), /* MX51_PAD_EIM_A26__CSI1_DATA_EN */
- IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 6, 0x908, 0), /* MX51_PAD_EIM_A26__DISP2_EXT_CLK */
- IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 0, 0x000, 0), /* MX51_PAD_EIM_A26__EIM_A26 */
- IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 1, 0x000, 0), /* MX51_PAD_EIM_A26__GPIO2_20 */
- IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 2, 0x000, 0), /* MX51_PAD_EIM_A26__USBH2_STP */
- IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 5, 0x99c, 0), /* MX51_PAD_EIM_A27__CSI2_DATA_EN */
- IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 6, 0x9a4, 0), /* MX51_PAD_EIM_A27__DISP1_PIN1 */
- IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 0, 0x000, 0), /* MX51_PAD_EIM_A27__EIM_A27 */
- IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 1, 0x000, 0), /* MX51_PAD_EIM_A27__GPIO2_21 */
- IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 2, 0x000, 0), /* MX51_PAD_EIM_A27__USBH2_NXT */
- IMX_PIN_REG(MX51_PAD_EIM_EB0, 0x460, 0x0cc, 0, 0x000, 0), /* MX51_PAD_EIM_EB0__EIM_EB0 */
- IMX_PIN_REG(MX51_PAD_EIM_EB1, 0x464, 0x0d0, 0, 0x000, 0), /* MX51_PAD_EIM_EB1__EIM_EB1 */
- IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 6, 0x8e0, 0), /* MX51_PAD_EIM_EB2__AUD5_RXFS */
- IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 5, 0x000, 0), /* MX51_PAD_EIM_EB2__CSI1_D2 */
- IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 0, 0x000, 0), /* MX51_PAD_EIM_EB2__EIM_EB2 */
- IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 3, 0x954, 0), /* MX51_PAD_EIM_EB2__FEC_MDIO */
- IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 1, 0x000, 0), /* MX51_PAD_EIM_EB2__GPIO2_22 */
- IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 7, 0x000, 0), /* MX51_PAD_EIM_EB2__GPT_CMPOUT1 */
- IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 6, 0x8dc, 0), /* MX51_PAD_EIM_EB3__AUD5_RXC */
- IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 5, 0x000, 0), /* MX51_PAD_EIM_EB3__CSI1_D3 */
- IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 0, 0x000, 0), /* MX51_PAD_EIM_EB3__EIM_EB3 */
- IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 3, 0x95c, 0), /* MX51_PAD_EIM_EB3__FEC_RDATA1 */
- IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 1, 0x000, 0), /* MX51_PAD_EIM_EB3__GPIO2_23 */
- IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 7, 0x000, 0), /* MX51_PAD_EIM_EB3__GPT_CMPOUT2 */
- IMX_PIN_REG(MX51_PAD_EIM_OE, 0x470, 0x0dc, 0, 0x000, 0), /* MX51_PAD_EIM_OE__EIM_OE */
- IMX_PIN_REG(MX51_PAD_EIM_OE, 0x470, 0x0dc, 1, 0x000, 0), /* MX51_PAD_EIM_OE__GPIO2_24 */
- IMX_PIN_REG(MX51_PAD_EIM_CS0, 0x474, 0x0e0, 0, 0x000, 0), /* MX51_PAD_EIM_CS0__EIM_CS0 */
- IMX_PIN_REG(MX51_PAD_EIM_CS0, 0x474, 0x0e0, 1, 0x000, 0), /* MX51_PAD_EIM_CS0__GPIO2_25 */
- IMX_PIN_REG(MX51_PAD_EIM_CS1, 0x478, 0x0e4, 0, 0x000, 0), /* MX51_PAD_EIM_CS1__EIM_CS1 */
- IMX_PIN_REG(MX51_PAD_EIM_CS1, 0x478, 0x0e4, 1, 0x000, 0), /* MX51_PAD_EIM_CS1__GPIO2_26 */
- IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 6, 0x8d8, 1), /* MX51_PAD_EIM_CS2__AUD5_TXD */
- IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 5, 0x000, 0), /* MX51_PAD_EIM_CS2__CSI1_D4 */
- IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 0, 0x000, 0), /* MX51_PAD_EIM_CS2__EIM_CS2 */
- IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 3, 0x960, 0), /* MX51_PAD_EIM_CS2__FEC_RDATA2 */
- IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 1, 0x000, 0), /* MX51_PAD_EIM_CS2__GPIO2_27 */
- IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 2, 0x000, 0), /* MX51_PAD_EIM_CS2__USBOTG_STP */
- IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 6, 0x8d4, 1), /* MX51_PAD_EIM_CS3__AUD5_RXD */
- IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 5, 0x000, 0), /* MX51_PAD_EIM_CS3__CSI1_D5 */
- IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 0, 0x000, 0), /* MX51_PAD_EIM_CS3__EIM_CS3 */
- IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 3, 0x964, 0), /* MX51_PAD_EIM_CS3__FEC_RDATA3 */
- IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 1, 0x000, 0), /* MX51_PAD_EIM_CS3__GPIO2_28 */
- IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 2, 0x000, 0), /* MX51_PAD_EIM_CS3__USBOTG_NXT */
- IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 6, 0x8e4, 1), /* MX51_PAD_EIM_CS4__AUD5_TXC */
- IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 5, 0x000, 0), /* MX51_PAD_EIM_CS4__CSI1_D6 */
- IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 0, 0x000, 0), /* MX51_PAD_EIM_CS4__EIM_CS4 */
- IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 3, 0x970, 0), /* MX51_PAD_EIM_CS4__FEC_RX_ER */
- IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 1, 0x000, 0), /* MX51_PAD_EIM_CS4__GPIO2_29 */
- IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 2, 0x000, 0), /* MX51_PAD_EIM_CS4__USBOTG_CLK */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 6, 0x8e8, 1), /* MX51_PAD_EIM_CS5__AUD5_TXFS */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 5, 0x000, 0), /* MX51_PAD_EIM_CS5__CSI1_D7 */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 4, 0x904, 0), /* MX51_PAD_EIM_CS5__DISP1_EXT_CLK */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 0, 0x000, 0), /* MX51_PAD_EIM_CS5__EIM_CS5 */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 3, 0x950, 0), /* MX51_PAD_EIM_CS5__FEC_CRS */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 1, 0x000, 0), /* MX51_PAD_EIM_CS5__GPIO2_30 */
- IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 2, 0x000, 0), /* MX51_PAD_EIM_CS5__USBOTG_DIR */
- IMX_PIN_REG(MX51_PAD_EIM_DTACK, 0x48c, 0x0f8, 0, 0x000, 0), /* MX51_PAD_EIM_DTACK__EIM_DTACK */
- IMX_PIN_REG(MX51_PAD_EIM_DTACK, 0x48c, 0x0f8, 1, 0x000, 0), /* MX51_PAD_EIM_DTACK__GPIO2_31 */
- IMX_PIN_REG(MX51_PAD_EIM_LBA, 0x494, 0x0fc, 0, 0x000, 0), /* MX51_PAD_EIM_LBA__EIM_LBA */
- IMX_PIN_REG(MX51_PAD_EIM_LBA, 0x494, 0x0fc, 1, 0x978, 0), /* MX51_PAD_EIM_LBA__GPIO3_1 */
- IMX_PIN_REG(MX51_PAD_EIM_CRE, 0x4a0, 0x100, 0, 0x000, 0), /* MX51_PAD_EIM_CRE__EIM_CRE */
- IMX_PIN_REG(MX51_PAD_EIM_CRE, 0x4a0, 0x100, 1, 0x97c, 0), /* MX51_PAD_EIM_CRE__GPIO3_2 */
- IMX_PIN_REG(MX51_PAD_DRAM_CS1, 0x4d0, 0x104, 0, 0x000, 0), /* MX51_PAD_DRAM_CS1__DRAM_CS1 */
- IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 3, 0x980, 0), /* MX51_PAD_NANDF_WE_B__GPIO3_3 */
- IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 0, 0x000, 0), /* MX51_PAD_NANDF_WE_B__NANDF_WE_B */
- IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 1, 0x000, 0), /* MX51_PAD_NANDF_WE_B__PATA_DIOW */
- IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 2, 0x93c, 0), /* MX51_PAD_NANDF_WE_B__SD3_DATA0 */
- IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 3, 0x984, 0), /* MX51_PAD_NANDF_RE_B__GPIO3_4 */
- IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 0, 0x000, 0), /* MX51_PAD_NANDF_RE_B__NANDF_RE_B */
- IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 1, 0x000, 0), /* MX51_PAD_NANDF_RE_B__PATA_DIOR */
- IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 2, 0x940, 0), /* MX51_PAD_NANDF_RE_B__SD3_DATA1 */
- IMX_PIN_REG(MX51_PAD_NANDF_ALE, 0x4ec, 0x110, 3, 0x988, 0), /* MX51_PAD_NANDF_ALE__GPIO3_5 */
- IMX_PIN_REG(MX51_PAD_NANDF_ALE, 0x4ec, 0x110, 0, 0x000, 0), /* MX51_PAD_NANDF_ALE__NANDF_ALE */
- IMX_PIN_REG(MX51_PAD_NANDF_ALE, 0x4ec, 0x110, 1, 0x000, 0), /* MX51_PAD_NANDF_ALE__PATA_BUFFER_EN */
- IMX_PIN_REG(MX51_PAD_NANDF_CLE, 0x4f0, 0x114, 3, 0x98c, 0), /* MX51_PAD_NANDF_CLE__GPIO3_6 */
- IMX_PIN_REG(MX51_PAD_NANDF_CLE, 0x4f0, 0x114, 0, 0x000, 0), /* MX51_PAD_NANDF_CLE__NANDF_CLE */
- IMX_PIN_REG(MX51_PAD_NANDF_CLE, 0x4f0, 0x114, 1, 0x000, 0), /* MX51_PAD_NANDF_CLE__PATA_RESET_B */
- IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 3, 0x990, 0), /* MX51_PAD_NANDF_WP_B__GPIO3_7 */
- IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 0, 0x000, 0), /* MX51_PAD_NANDF_WP_B__NANDF_WP_B */
- IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 1, 0x000, 0), /* MX51_PAD_NANDF_WP_B__PATA_DMACK */
- IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 2, 0x944, 0), /* MX51_PAD_NANDF_WP_B__SD3_DATA2 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 5, 0x930, 0), /* MX51_PAD_NANDF_RB0__ECSPI2_SS1 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 3, 0x994, 0), /* MX51_PAD_NANDF_RB0__GPIO3_8 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 0, 0x000, 0), /* MX51_PAD_NANDF_RB0__NANDF_RB0 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 1, 0x000, 0), /* MX51_PAD_NANDF_RB0__PATA_DMARQ */
- IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 2, 0x948, 0), /* MX51_PAD_NANDF_RB0__SD3_DATA3 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 6, 0x91c, 0), /* MX51_PAD_NANDF_RB1__CSPI_MOSI */
- IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 2, 0x000, 0), /* MX51_PAD_NANDF_RB1__ECSPI2_RDY */
- IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 3, 0x000, 0), /* MX51_PAD_NANDF_RB1__GPIO3_9 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 0, 0x000, 0), /* MX51_PAD_NANDF_RB1__NANDF_RB1 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 1, 0x000, 0), /* MX51_PAD_NANDF_RB1__PATA_IORDY */
- IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 5, 0x000, 0), /* MX51_PAD_NANDF_RB1__SD4_CMD */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 5, 0x9a8, 0), /* MX51_PAD_NANDF_RB2__DISP2_WAIT */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 2, 0x000, 0), /* MX51_PAD_NANDF_RB2__ECSPI2_SCLK */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 1, 0x94c, 0), /* MX51_PAD_NANDF_RB2__FEC_COL */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 3, 0x000, 0), /* MX51_PAD_NANDF_RB2__GPIO3_10 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 0, 0x000, 0), /* MX51_PAD_NANDF_RB2__NANDF_RB2 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 7, 0x000, 0), /* MX51_PAD_NANDF_RB2__USBH3_H3_DP */
- IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 6, 0xa20, 0), /* MX51_PAD_NANDF_RB2__USBH3_NXT */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 5, 0x000, 0), /* MX51_PAD_NANDF_RB3__DISP1_WAIT */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 2, 0x000, 0), /* MX51_PAD_NANDF_RB3__ECSPI2_MISO */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 1, 0x968, 0), /* MX51_PAD_NANDF_RB3__FEC_RX_CLK */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 3, 0x000, 0), /* MX51_PAD_NANDF_RB3__GPIO3_11 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 0, 0x000, 0), /* MX51_PAD_NANDF_RB3__NANDF_RB3 */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 6, 0x9f8, 0), /* MX51_PAD_NANDF_RB3__USBH3_CLK */
- IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 7, 0x000, 0), /* MX51_PAD_NANDF_RB3__USBH3_H3_DM */
- IMX_PIN_REG(MX51_PAD_GPIO_NAND, 0x514, 0x12c, 0, 0x998, 0), /* MX51_PAD_GPIO_NAND__GPIO_NAND */
- IMX_PIN_REG(MX51_PAD_GPIO_NAND, 0x514, 0x12c, 1, 0x000, 0), /* MX51_PAD_GPIO_NAND__PATA_INTRQ */
- IMX_PIN_REG(MX51_PAD_NANDF_CS0, 0x518, 0x130, 3, 0x000, 0), /* MX51_PAD_NANDF_CS0__GPIO3_16 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS0, 0x518, 0x130, 0, 0x000, 0), /* MX51_PAD_NANDF_CS0__NANDF_CS0 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS1, 0x51c, 0x134, 3, 0x000, 0), /* MX51_PAD_NANDF_CS1__GPIO3_17 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS1, 0x51c, 0x134, 0, 0x000, 0), /* MX51_PAD_NANDF_CS1__NANDF_CS1 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 6, 0x914, 0), /* MX51_PAD_NANDF_CS2__CSPI_SCLK */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 2, 0x000, 0), /* MX51_PAD_NANDF_CS2__FEC_TX_ER */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 3, 0x000, 0), /* MX51_PAD_NANDF_CS2__GPIO3_18 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 0, 0x000, 0), /* MX51_PAD_NANDF_CS2__NANDF_CS2 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 1, 0x000, 0), /* MX51_PAD_NANDF_CS2__PATA_CS_0 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 5, 0x000, 0), /* MX51_PAD_NANDF_CS2__SD4_CLK */
- IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 7, 0x000, 0), /* MX51_PAD_NANDF_CS2__USBH3_H1_DP */
- IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 2, 0x000, 0), /* MX51_PAD_NANDF_CS3__FEC_MDC */
- IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 3, 0x000, 0), /* MX51_PAD_NANDF_CS3__GPIO3_19 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 0, 0x000, 0), /* MX51_PAD_NANDF_CS3__NANDF_CS3 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 1, 0x000, 0), /* MX51_PAD_NANDF_CS3__PATA_CS_1 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 5, 0x000, 0), /* MX51_PAD_NANDF_CS3__SD4_DAT0 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 7, 0x000, 0), /* MX51_PAD_NANDF_CS3__USBH3_H1_DM */
- IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 2, 0x000, 0), /* MX51_PAD_NANDF_CS4__FEC_TDATA1 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 3, 0x000, 0), /* MX51_PAD_NANDF_CS4__GPIO3_20 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 0, 0x000, 0), /* MX51_PAD_NANDF_CS4__NANDF_CS4 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 1, 0x000, 0), /* MX51_PAD_NANDF_CS4__PATA_DA_0 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 5, 0x000, 0), /* MX51_PAD_NANDF_CS4__SD4_DAT1 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 7, 0xa24, 0), /* MX51_PAD_NANDF_CS4__USBH3_STP */
- IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 2, 0x000, 0), /* MX51_PAD_NANDF_CS5__FEC_TDATA2 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 3, 0x000, 0), /* MX51_PAD_NANDF_CS5__GPIO3_21 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 0, 0x000, 0), /* MX51_PAD_NANDF_CS5__NANDF_CS5 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 1, 0x000, 0), /* MX51_PAD_NANDF_CS5__PATA_DA_1 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 5, 0x000, 0), /* MX51_PAD_NANDF_CS5__SD4_DAT2 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 7, 0xa1c, 0), /* MX51_PAD_NANDF_CS5__USBH3_DIR */
- IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 7, 0x928, 0), /* MX51_PAD_NANDF_CS6__CSPI_SS3 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 2, 0x000, 0), /* MX51_PAD_NANDF_CS6__FEC_TDATA3 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 3, 0x000, 0), /* MX51_PAD_NANDF_CS6__GPIO3_22 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 0, 0x000, 0), /* MX51_PAD_NANDF_CS6__NANDF_CS6 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 1, 0x000, 0), /* MX51_PAD_NANDF_CS6__PATA_DA_2 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 5, 0x000, 0), /* MX51_PAD_NANDF_CS6__SD4_DAT3 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 1, 0x000, 0), /* MX51_PAD_NANDF_CS7__FEC_TX_EN */
- IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 3, 0x000, 0), /* MX51_PAD_NANDF_CS7__GPIO3_23 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 0, 0x000, 0), /* MX51_PAD_NANDF_CS7__NANDF_CS7 */
- IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 5, 0x000, 0), /* MX51_PAD_NANDF_CS7__SD3_CLK */
- IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 2, 0x000, 0), /* MX51_PAD_NANDF_RDY_INT__ECSPI2_SS0 */
- IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 1, 0x974, 0), /* MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK */
- IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 3, 0x000, 0), /* MX51_PAD_NANDF_RDY_INT__GPIO3_24 */
- IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 0, 0x938, 0), /* MX51_PAD_NANDF_RDY_INT__NANDF_RDY_INT */
- IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 5, 0x000, 0), /* MX51_PAD_NANDF_RDY_INT__SD3_CMD */
- IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 2, 0x000, 0), /* MX51_PAD_NANDF_D15__ECSPI2_MOSI */
- IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 3, 0x000, 0), /* MX51_PAD_NANDF_D15__GPIO3_25 */
- IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 0, 0x000, 0), /* MX51_PAD_NANDF_D15__NANDF_D15 */
- IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 1, 0x000, 0), /* MX51_PAD_NANDF_D15__PATA_DATA15 */
- IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 5, 0x000, 0), /* MX51_PAD_NANDF_D15__SD3_DAT7 */
- IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 2, 0x934, 0), /* MX51_PAD_NANDF_D14__ECSPI2_SS3 */
- IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 3, 0x000, 0), /* MX51_PAD_NANDF_D14__GPIO3_26 */
- IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 0, 0x000, 0), /* MX51_PAD_NANDF_D14__NANDF_D14 */
- IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 1, 0x000, 0), /* MX51_PAD_NANDF_D14__PATA_DATA14 */
- IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 5, 0x000, 0), /* MX51_PAD_NANDF_D14__SD3_DAT6 */
- IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 2, 0x000, 0), /* MX51_PAD_NANDF_D13__ECSPI2_SS2 */
- IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 3, 0x000, 0), /* MX51_PAD_NANDF_D13__GPIO3_27 */
- IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 0, 0x000, 0), /* MX51_PAD_NANDF_D13__NANDF_D13 */
- IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 1, 0x000, 0), /* MX51_PAD_NANDF_D13__PATA_DATA13 */
- IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 5, 0x000, 0), /* MX51_PAD_NANDF_D13__SD3_DAT5 */
- IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 2, 0x930, 1), /* MX51_PAD_NANDF_D12__ECSPI2_SS1 */
- IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 3, 0x000, 0), /* MX51_PAD_NANDF_D12__GPIO3_28 */
- IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 0, 0x000, 0), /* MX51_PAD_NANDF_D12__NANDF_D12 */
- IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 1, 0x000, 0), /* MX51_PAD_NANDF_D12__PATA_DATA12 */
- IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 5, 0x000, 0), /* MX51_PAD_NANDF_D12__SD3_DAT4 */
- IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 2, 0x96c, 0), /* MX51_PAD_NANDF_D11__FEC_RX_DV */
- IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 3, 0x000, 0), /* MX51_PAD_NANDF_D11__GPIO3_29 */
- IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 0, 0x000, 0), /* MX51_PAD_NANDF_D11__NANDF_D11 */
- IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 1, 0x000, 0), /* MX51_PAD_NANDF_D11__PATA_DATA11 */
- IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 5, 0x948, 1), /* MX51_PAD_NANDF_D11__SD3_DATA3 */
- IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 3, 0x000, 0), /* MX51_PAD_NANDF_D10__GPIO3_30 */
- IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 0, 0x000, 0), /* MX51_PAD_NANDF_D10__NANDF_D10 */
- IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 1, 0x000, 0), /* MX51_PAD_NANDF_D10__PATA_DATA10 */
- IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 5, 0x944, 1), /* MX51_PAD_NANDF_D10__SD3_DATA2 */
- IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 2, 0x958, 0), /* MX51_PAD_NANDF_D9__FEC_RDATA0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 3, 0x000, 0), /* MX51_PAD_NANDF_D9__GPIO3_31 */
- IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 0, 0x000, 0), /* MX51_PAD_NANDF_D9__NANDF_D9 */
- IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 1, 0x000, 0), /* MX51_PAD_NANDF_D9__PATA_DATA9 */
- IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 5, 0x940, 1), /* MX51_PAD_NANDF_D9__SD3_DATA1 */
- IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 2, 0x000, 0), /* MX51_PAD_NANDF_D8__FEC_TDATA0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 3, 0x000, 0), /* MX51_PAD_NANDF_D8__GPIO4_0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 0, 0x000, 0), /* MX51_PAD_NANDF_D8__NANDF_D8 */
- IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 1, 0x000, 0), /* MX51_PAD_NANDF_D8__PATA_DATA8 */
- IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 5, 0x93c, 1), /* MX51_PAD_NANDF_D8__SD3_DATA0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 3, 0x000, 0), /* MX51_PAD_NANDF_D7__GPIO4_1 */
- IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 0, 0x000, 0), /* MX51_PAD_NANDF_D7__NANDF_D7 */
- IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 1, 0x000, 0), /* MX51_PAD_NANDF_D7__PATA_DATA7 */
- IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 5, 0x9fc, 0), /* MX51_PAD_NANDF_D7__USBH3_DATA0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 3, 0x000, 0), /* MX51_PAD_NANDF_D6__GPIO4_2 */
- IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 0, 0x000, 0), /* MX51_PAD_NANDF_D6__NANDF_D6 */
- IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 1, 0x000, 0), /* MX51_PAD_NANDF_D6__PATA_DATA6 */
- IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 2, 0x000, 0), /* MX51_PAD_NANDF_D6__SD4_LCTL */
- IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 5, 0xa00, 0), /* MX51_PAD_NANDF_D6__USBH3_DATA1 */
- IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 3, 0x000, 0), /* MX51_PAD_NANDF_D5__GPIO4_3 */
- IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 0, 0x000, 0), /* MX51_PAD_NANDF_D5__NANDF_D5 */
- IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 1, 0x000, 0), /* MX51_PAD_NANDF_D5__PATA_DATA5 */
- IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 2, 0x000, 0), /* MX51_PAD_NANDF_D5__SD4_WP */
- IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 5, 0xa04, 0), /* MX51_PAD_NANDF_D5__USBH3_DATA2 */
- IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 3, 0x000, 0), /* MX51_PAD_NANDF_D4__GPIO4_4 */
- IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 0, 0x000, 0), /* MX51_PAD_NANDF_D4__NANDF_D4 */
- IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 1, 0x000, 0), /* MX51_PAD_NANDF_D4__PATA_DATA4 */
- IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 2, 0x000, 0), /* MX51_PAD_NANDF_D4__SD4_CD */
- IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 5, 0xa08, 0), /* MX51_PAD_NANDF_D4__USBH3_DATA3 */
- IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 3, 0x000, 0), /* MX51_PAD_NANDF_D3__GPIO4_5 */
- IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 0, 0x000, 0), /* MX51_PAD_NANDF_D3__NANDF_D3 */
- IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 1, 0x000, 0), /* MX51_PAD_NANDF_D3__PATA_DATA3 */
- IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 2, 0x000, 0), /* MX51_PAD_NANDF_D3__SD4_DAT4 */
- IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 5, 0xa0c, 0), /* MX51_PAD_NANDF_D3__USBH3_DATA4 */
- IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 3, 0x000, 0), /* MX51_PAD_NANDF_D2__GPIO4_6 */
- IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 0, 0x000, 0), /* MX51_PAD_NANDF_D2__NANDF_D2 */
- IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 1, 0x000, 0), /* MX51_PAD_NANDF_D2__PATA_DATA2 */
- IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 2, 0x000, 0), /* MX51_PAD_NANDF_D2__SD4_DAT5 */
- IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 5, 0xa10, 0), /* MX51_PAD_NANDF_D2__USBH3_DATA5 */
- IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 3, 0x000, 0), /* MX51_PAD_NANDF_D1__GPIO4_7 */
- IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 0, 0x000, 0), /* MX51_PAD_NANDF_D1__NANDF_D1 */
- IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 1, 0x000, 0), /* MX51_PAD_NANDF_D1__PATA_DATA1 */
- IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 2, 0x000, 0), /* MX51_PAD_NANDF_D1__SD4_DAT6 */
- IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 5, 0xa14, 0), /* MX51_PAD_NANDF_D1__USBH3_DATA6 */
- IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 3, 0x000, 0), /* MX51_PAD_NANDF_D0__GPIO4_8 */
- IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 0, 0x000, 0), /* MX51_PAD_NANDF_D0__NANDF_D0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 1, 0x000, 0), /* MX51_PAD_NANDF_D0__PATA_DATA0 */
- IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 2, 0x000, 0), /* MX51_PAD_NANDF_D0__SD4_DAT7 */
- IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 5, 0xa18, 0), /* MX51_PAD_NANDF_D0__USBH3_DATA7 */
- IMX_PIN_REG(MX51_PAD_CSI1_D8, 0x57c, 0x194, 0, 0x000, 0), /* MX51_PAD_CSI1_D8__CSI1_D8 */
- IMX_PIN_REG(MX51_PAD_CSI1_D8, 0x57c, 0x194, 3, 0x998, 1), /* MX51_PAD_CSI1_D8__GPIO3_12 */
- IMX_PIN_REG(MX51_PAD_CSI1_D9, 0x580, 0x198, 0, 0x000, 0), /* MX51_PAD_CSI1_D9__CSI1_D9 */
- IMX_PIN_REG(MX51_PAD_CSI1_D9, 0x580, 0x198, 3, 0x000, 0), /* MX51_PAD_CSI1_D9__GPIO3_13 */
- IMX_PIN_REG(MX51_PAD_CSI1_D10, 0x584, 0x19c, 0, 0x000, 0), /* MX51_PAD_CSI1_D10__CSI1_D10 */
- IMX_PIN_REG(MX51_PAD_CSI1_D11, 0x588, 0x1a0, 0, 0x000, 0), /* MX51_PAD_CSI1_D11__CSI1_D11 */
- IMX_PIN_REG(MX51_PAD_CSI1_D12, 0x58c, 0x1a4, 0, 0x000, 0), /* MX51_PAD_CSI1_D12__CSI1_D12 */
- IMX_PIN_REG(MX51_PAD_CSI1_D13, 0x590, 0x1a8, 0, 0x000, 0), /* MX51_PAD_CSI1_D13__CSI1_D13 */
- IMX_PIN_REG(MX51_PAD_CSI1_D14, 0x594, 0x1ac, 0, 0x000, 0), /* MX51_PAD_CSI1_D14__CSI1_D14 */
- IMX_PIN_REG(MX51_PAD_CSI1_D15, 0x598, 0x1b0, 0, 0x000, 0), /* MX51_PAD_CSI1_D15__CSI1_D15 */
- IMX_PIN_REG(MX51_PAD_CSI1_D16, 0x59c, 0x1b4, 0, 0x000, 0), /* MX51_PAD_CSI1_D16__CSI1_D16 */
- IMX_PIN_REG(MX51_PAD_CSI1_D17, 0x5a0, 0x1b8, 0, 0x000, 0), /* MX51_PAD_CSI1_D17__CSI1_D17 */
- IMX_PIN_REG(MX51_PAD_CSI1_D18, 0x5a4, 0x1bc, 0, 0x000, 0), /* MX51_PAD_CSI1_D18__CSI1_D18 */
- IMX_PIN_REG(MX51_PAD_CSI1_D19, 0x5a8, 0x1c0, 0, 0x000, 0), /* MX51_PAD_CSI1_D19__CSI1_D19 */
- IMX_PIN_REG(MX51_PAD_CSI1_VSYNC, 0x5ac, 0x1c4, 0, 0x000, 0), /* MX51_PAD_CSI1_VSYNC__CSI1_VSYNC */
- IMX_PIN_REG(MX51_PAD_CSI1_VSYNC, 0x5ac, 0x1c4, 3, 0x000, 0), /* MX51_PAD_CSI1_VSYNC__GPIO3_14 */
- IMX_PIN_REG(MX51_PAD_CSI1_HSYNC, 0x5b0, 0x1c8, 0, 0x000, 0), /* MX51_PAD_CSI1_HSYNC__CSI1_HSYNC */
- IMX_PIN_REG(MX51_PAD_CSI1_HSYNC, 0x5b0, 0x1c8, 3, 0x000, 0), /* MX51_PAD_CSI1_HSYNC__GPIO3_15 */
- IMX_PIN_REG(MX51_PAD_CSI1_PIXCLK, 0x5b4, NO_MUX, 0, 0x000, 0), /* MX51_PAD_CSI1_PIXCLK__CSI1_PIXCLK */
- IMX_PIN_REG(MX51_PAD_CSI1_MCLK, 0x5b8, NO_MUX, 0, 0x000, 0), /* MX51_PAD_CSI1_MCLK__CSI1_MCLK */
- IMX_PIN_REG(MX51_PAD_CSI2_D12, 0x5bc, 0x1cc, 0, 0x000, 0), /* MX51_PAD_CSI2_D12__CSI2_D12 */
- IMX_PIN_REG(MX51_PAD_CSI2_D12, 0x5bc, 0x1cc, 3, 0x000, 0), /* MX51_PAD_CSI2_D12__GPIO4_9 */
- IMX_PIN_REG(MX51_PAD_CSI2_D13, 0x5c0, 0x1d0, 0, 0x000, 0), /* MX51_PAD_CSI2_D13__CSI2_D13 */
- IMX_PIN_REG(MX51_PAD_CSI2_D13, 0x5c0, 0x1d0, 3, 0x000, 0), /* MX51_PAD_CSI2_D13__GPIO4_10 */
- IMX_PIN_REG(MX51_PAD_CSI2_D14, 0x5c4, 0x1d4, 0, 0x000, 0), /* MX51_PAD_CSI2_D14__CSI2_D14 */
- IMX_PIN_REG(MX51_PAD_CSI2_D15, 0x5c8, 0x1d8, 0, 0x000, 0), /* MX51_PAD_CSI2_D15__CSI2_D15 */
- IMX_PIN_REG(MX51_PAD_CSI2_D16, 0x5cc, 0x1dc, 0, 0x000, 0), /* MX51_PAD_CSI2_D16__CSI2_D16 */
- IMX_PIN_REG(MX51_PAD_CSI2_D17, 0x5d0, 0x1e0, 0, 0x000, 0), /* MX51_PAD_CSI2_D17__CSI2_D17 */
- IMX_PIN_REG(MX51_PAD_CSI2_D18, 0x5d4, 0x1e4, 0, 0x000, 0), /* MX51_PAD_CSI2_D18__CSI2_D18 */
- IMX_PIN_REG(MX51_PAD_CSI2_D18, 0x5d4, 0x1e4, 3, 0x000, 0), /* MX51_PAD_CSI2_D18__GPIO4_11 */
- IMX_PIN_REG(MX51_PAD_CSI2_D19, 0x5d8, 0x1e8, 0, 0x000, 0), /* MX51_PAD_CSI2_D19__CSI2_D19 */
- IMX_PIN_REG(MX51_PAD_CSI2_D19, 0x5d8, 0x1e8, 3, 0x000, 0), /* MX51_PAD_CSI2_D19__GPIO4_12 */
- IMX_PIN_REG(MX51_PAD_CSI2_VSYNC, 0x5dc, 0x1ec, 0, 0x000, 0), /* MX51_PAD_CSI2_VSYNC__CSI2_VSYNC */
- IMX_PIN_REG(MX51_PAD_CSI2_VSYNC, 0x5dc, 0x1ec, 3, 0x000, 0), /* MX51_PAD_CSI2_VSYNC__GPIO4_13 */
- IMX_PIN_REG(MX51_PAD_CSI2_HSYNC, 0x5e0, 0x1f0, 0, 0x000, 0), /* MX51_PAD_CSI2_HSYNC__CSI2_HSYNC */
- IMX_PIN_REG(MX51_PAD_CSI2_HSYNC, 0x5e0, 0x1f0, 3, 0x000, 0), /* MX51_PAD_CSI2_HSYNC__GPIO4_14 */
- IMX_PIN_REG(MX51_PAD_CSI2_PIXCLK, 0x5e4, 0x1f4, 0, 0x000, 0), /* MX51_PAD_CSI2_PIXCLK__CSI2_PIXCLK */
- IMX_PIN_REG(MX51_PAD_CSI2_PIXCLK, 0x5e4, 0x1f4, 3, 0x000, 0), /* MX51_PAD_CSI2_PIXCLK__GPIO4_15 */
- IMX_PIN_REG(MX51_PAD_I2C1_CLK, 0x5e8, 0x1f8, 3, 0x000, 0), /* MX51_PAD_I2C1_CLK__GPIO4_16 */
- IMX_PIN_REG(MX51_PAD_I2C1_CLK, 0x5e8, 0x1f8, 0, 0x000, 0), /* MX51_PAD_I2C1_CLK__I2C1_CLK */
- IMX_PIN_REG(MX51_PAD_I2C1_DAT, 0x5ec, 0x1fc, 3, 0x000, 0), /* MX51_PAD_I2C1_DAT__GPIO4_17 */
- IMX_PIN_REG(MX51_PAD_I2C1_DAT, 0x5ec, 0x1fc, 0, 0x000, 0), /* MX51_PAD_I2C1_DAT__I2C1_DAT */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_TXD, 0x5f0, 0x200, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_TXD__AUD3_TXD */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_TXD, 0x5f0, 0x200, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_TXD__GPIO4_18 */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_RXD, 0x5f4, 0x204, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_RXD__AUD3_RXD */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_RXD, 0x5f4, 0x204, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_RXD__GPIO4_19 */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_RXD, 0x5f4, 0x204, 1, 0x9f4, 2), /* MX51_PAD_AUD3_BB_RXD__UART3_RXD */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_CK, 0x5f8, 0x208, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_CK__AUD3_TXC */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_CK, 0x5f8, 0x208, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_CK__GPIO4_20 */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_FS, 0x5fc, 0x20c, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_FS__AUD3_TXFS */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_FS, 0x5fc, 0x20c, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_FS__GPIO4_21 */
- IMX_PIN_REG(MX51_PAD_AUD3_BB_FS, 0x5fc, 0x20c, 1, 0x000, 0), /* MX51_PAD_AUD3_BB_FS__UART3_TXD */
- IMX_PIN_REG(MX51_PAD_CSPI1_MOSI, 0x600, 0x210, 0, 0x000, 0), /* MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI */
- IMX_PIN_REG(MX51_PAD_CSPI1_MOSI, 0x600, 0x210, 3, 0x000, 0), /* MX51_PAD_CSPI1_MOSI__GPIO4_22 */
- IMX_PIN_REG(MX51_PAD_CSPI1_MOSI, 0x600, 0x210, 1, 0x9b4, 1), /* MX51_PAD_CSPI1_MOSI__I2C1_SDA */
- IMX_PIN_REG(MX51_PAD_CSPI1_MISO, 0x604, 0x214, 1, 0x8c4, 1), /* MX51_PAD_CSPI1_MISO__AUD4_RXD */
- IMX_PIN_REG(MX51_PAD_CSPI1_MISO, 0x604, 0x214, 0, 0x000, 0), /* MX51_PAD_CSPI1_MISO__ECSPI1_MISO */
- IMX_PIN_REG(MX51_PAD_CSPI1_MISO, 0x604, 0x214, 3, 0x000, 0), /* MX51_PAD_CSPI1_MISO__GPIO4_23 */
- IMX_PIN_REG(MX51_PAD_CSPI1_SS0, 0x608, 0x218, 1, 0x8cc, 1), /* MX51_PAD_CSPI1_SS0__AUD4_TXC */
- IMX_PIN_REG(MX51_PAD_CSPI1_SS0, 0x608, 0x218, 0, 0x000, 0), /* MX51_PAD_CSPI1_SS0__ECSPI1_SS0 */
- IMX_PIN_REG(MX51_PAD_CSPI1_SS0, 0x608, 0x218, 3, 0x000, 0), /* MX51_PAD_CSPI1_SS0__GPIO4_24 */
- IMX_PIN_REG(MX51_PAD_CSPI1_SS1, 0x60c, 0x21c, 1, 0x8c8, 1), /* MX51_PAD_CSPI1_SS1__AUD4_TXD */
- IMX_PIN_REG(MX51_PAD_CSPI1_SS1, 0x60c, 0x21c, 0, 0x000, 0), /* MX51_PAD_CSPI1_SS1__ECSPI1_SS1 */
- IMX_PIN_REG(MX51_PAD_CSPI1_SS1, 0x60c, 0x21c, 3, 0x000, 0), /* MX51_PAD_CSPI1_SS1__GPIO4_25 */
- IMX_PIN_REG(MX51_PAD_CSPI1_RDY, 0x610, 0x220, 1, 0x8d0, 1), /* MX51_PAD_CSPI1_RDY__AUD4_TXFS */
- IMX_PIN_REG(MX51_PAD_CSPI1_RDY, 0x610, 0x220, 0, 0x000, 0), /* MX51_PAD_CSPI1_RDY__ECSPI1_RDY */
- IMX_PIN_REG(MX51_PAD_CSPI1_RDY, 0x610, 0x220, 3, 0x000, 0), /* MX51_PAD_CSPI1_RDY__GPIO4_26 */
- IMX_PIN_REG(MX51_PAD_CSPI1_SCLK, 0x614, 0x224, 0, 0x000, 0), /* MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK */
- IMX_PIN_REG(MX51_PAD_CSPI1_SCLK, 0x614, 0x224, 3, 0x000, 0), /* MX51_PAD_CSPI1_SCLK__GPIO4_27 */
- IMX_PIN_REG(MX51_PAD_CSPI1_SCLK, 0x614, 0x224, 1, 0x9b0, 1), /* MX51_PAD_CSPI1_SCLK__I2C1_SCL */
- IMX_PIN_REG(MX51_PAD_UART1_RXD, 0x618, 0x228, 3, 0x000, 0), /* MX51_PAD_UART1_RXD__GPIO4_28 */
- IMX_PIN_REG(MX51_PAD_UART1_RXD, 0x618, 0x228, 0, 0x9e4, 0), /* MX51_PAD_UART1_RXD__UART1_RXD */
- IMX_PIN_REG(MX51_PAD_UART1_TXD, 0x61c, 0x22c, 3, 0x000, 0), /* MX51_PAD_UART1_TXD__GPIO4_29 */
- IMX_PIN_REG(MX51_PAD_UART1_TXD, 0x61c, 0x22c, 1, 0x000, 0), /* MX51_PAD_UART1_TXD__PWM2_PWMO */
- IMX_PIN_REG(MX51_PAD_UART1_TXD, 0x61c, 0x22c, 0, 0x000, 0), /* MX51_PAD_UART1_TXD__UART1_TXD */
- IMX_PIN_REG(MX51_PAD_UART1_RTS, 0x620, 0x230, 3, 0x000, 0), /* MX51_PAD_UART1_RTS__GPIO4_30 */
- IMX_PIN_REG(MX51_PAD_UART1_RTS, 0x620, 0x230, 0, 0x9e0, 0), /* MX51_PAD_UART1_RTS__UART1_RTS */
- IMX_PIN_REG(MX51_PAD_UART1_CTS, 0x624, 0x234, 3, 0x000, 0), /* MX51_PAD_UART1_CTS__GPIO4_31 */
- IMX_PIN_REG(MX51_PAD_UART1_CTS, 0x624, 0x234, 0, 0x000, 0), /* MX51_PAD_UART1_CTS__UART1_CTS */
- IMX_PIN_REG(MX51_PAD_UART2_RXD, 0x628, 0x238, 1, 0x000, 0), /* MX51_PAD_UART2_RXD__FIRI_TXD */
- IMX_PIN_REG(MX51_PAD_UART2_RXD, 0x628, 0x238, 3, 0x000, 0), /* MX51_PAD_UART2_RXD__GPIO1_20 */
- IMX_PIN_REG(MX51_PAD_UART2_RXD, 0x628, 0x238, 0, 0x9ec, 2), /* MX51_PAD_UART2_RXD__UART2_RXD */
- IMX_PIN_REG(MX51_PAD_UART2_TXD, 0x62c, 0x23c, 1, 0x000, 0), /* MX51_PAD_UART2_TXD__FIRI_RXD */
- IMX_PIN_REG(MX51_PAD_UART2_TXD, 0x62c, 0x23c, 3, 0x000, 0), /* MX51_PAD_UART2_TXD__GPIO1_21 */
- IMX_PIN_REG(MX51_PAD_UART2_TXD, 0x62c, 0x23c, 0, 0x000, 0), /* MX51_PAD_UART2_TXD__UART2_TXD */
- IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 2, 0x000, 0), /* MX51_PAD_UART3_RXD__CSI1_D0 */
- IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 3, 0x000, 0), /* MX51_PAD_UART3_RXD__GPIO1_22 */
- IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 0, 0x000, 0), /* MX51_PAD_UART3_RXD__UART1_DTR */
- IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 1, 0x9f4, 4), /* MX51_PAD_UART3_RXD__UART3_RXD */
- IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 2, 0x000, 0), /* MX51_PAD_UART3_TXD__CSI1_D1 */
- IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 3, 0x000, 0), /* MX51_PAD_UART3_TXD__GPIO1_23 */
- IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 0, 0x000, 0), /* MX51_PAD_UART3_TXD__UART1_DSR */
- IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 1, 0x000, 0), /* MX51_PAD_UART3_TXD__UART3_TXD */
- IMX_PIN_REG(MX51_PAD_OWIRE_LINE, 0x638, 0x248, 3, 0x000, 0), /* MX51_PAD_OWIRE_LINE__GPIO1_24 */
- IMX_PIN_REG(MX51_PAD_OWIRE_LINE, 0x638, 0x248, 0, 0x000, 0), /* MX51_PAD_OWIRE_LINE__OWIRE_LINE */
- IMX_PIN_REG(MX51_PAD_OWIRE_LINE, 0x638, 0x248, 6, 0x000, 0), /* MX51_PAD_OWIRE_LINE__SPDIF_OUT */
- IMX_PIN_REG(MX51_PAD_KEY_ROW0, 0x63c, 0x24c, 0, 0x000, 0), /* MX51_PAD_KEY_ROW0__KEY_ROW0 */
- IMX_PIN_REG(MX51_PAD_KEY_ROW1, 0x640, 0x250, 0, 0x000, 0), /* MX51_PAD_KEY_ROW1__KEY_ROW1 */
- IMX_PIN_REG(MX51_PAD_KEY_ROW2, 0x644, 0x254, 0, 0x000, 0), /* MX51_PAD_KEY_ROW2__KEY_ROW2 */
- IMX_PIN_REG(MX51_PAD_KEY_ROW3, 0x648, 0x258, 0, 0x000, 0), /* MX51_PAD_KEY_ROW3__KEY_ROW3 */
- IMX_PIN_REG(MX51_PAD_KEY_COL0, 0x64c, 0x25c, 0, 0x000, 0), /* MX51_PAD_KEY_COL0__KEY_COL0 */
- IMX_PIN_REG(MX51_PAD_KEY_COL0, 0x64c, 0x25c, 7, 0x90c, 0), /* MX51_PAD_KEY_COL0__PLL1_BYP */
- IMX_PIN_REG(MX51_PAD_KEY_COL1, 0x650, 0x260, 0, 0x000, 0), /* MX51_PAD_KEY_COL1__KEY_COL1 */
- IMX_PIN_REG(MX51_PAD_KEY_COL1, 0x650, 0x260, 7, 0x910, 0), /* MX51_PAD_KEY_COL1__PLL2_BYP */
- IMX_PIN_REG(MX51_PAD_KEY_COL2, 0x654, 0x264, 0, 0x000, 0), /* MX51_PAD_KEY_COL2__KEY_COL2 */
- IMX_PIN_REG(MX51_PAD_KEY_COL2, 0x654, 0x264, 7, 0x000, 0), /* MX51_PAD_KEY_COL2__PLL3_BYP */
- IMX_PIN_REG(MX51_PAD_KEY_COL3, 0x658, 0x268, 0, 0x000, 0), /* MX51_PAD_KEY_COL3__KEY_COL3 */
- IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 3, 0x9b8, 1), /* MX51_PAD_KEY_COL4__I2C2_SCL */
- IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 0, 0x000, 0), /* MX51_PAD_KEY_COL4__KEY_COL4 */
- IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 6, 0x000, 0), /* MX51_PAD_KEY_COL4__SPDIF_OUT1 */
- IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 1, 0x000, 0), /* MX51_PAD_KEY_COL4__UART1_RI */
- IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 2, 0x9f0, 4), /* MX51_PAD_KEY_COL4__UART3_RTS */
- IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 3, 0x9bc, 1), /* MX51_PAD_KEY_COL5__I2C2_SDA */
- IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 0, 0x000, 0), /* MX51_PAD_KEY_COL5__KEY_COL5 */
- IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 1, 0x000, 0), /* MX51_PAD_KEY_COL5__UART1_DCD */
- IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 2, 0x000, 0), /* MX51_PAD_KEY_COL5__UART3_CTS */
- IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 1, 0x914, 1), /* MX51_PAD_USBH1_CLK__CSPI_SCLK */
- IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 2, 0x000, 0), /* MX51_PAD_USBH1_CLK__GPIO1_25 */
- IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 5, 0x9b8, 2), /* MX51_PAD_USBH1_CLK__I2C2_SCL */
- IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 0, 0x000, 0), /* MX51_PAD_USBH1_CLK__USBH1_CLK */
- IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 1, 0x91c, 1), /* MX51_PAD_USBH1_DIR__CSPI_MOSI */
- IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 2, 0x000, 0), /* MX51_PAD_USBH1_DIR__GPIO1_26 */
- IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 5, 0x9bc, 2), /* MX51_PAD_USBH1_DIR__I2C2_SDA */
- IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 0, 0x000, 0), /* MX51_PAD_USBH1_DIR__USBH1_DIR */
- IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 1, 0x000, 0), /* MX51_PAD_USBH1_STP__CSPI_RDY */
- IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 2, 0x000, 0), /* MX51_PAD_USBH1_STP__GPIO1_27 */
- IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 5, 0x9f4, 6), /* MX51_PAD_USBH1_STP__UART3_RXD */
- IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 0, 0x000, 0), /* MX51_PAD_USBH1_STP__USBH1_STP */
- IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 1, 0x918, 0), /* MX51_PAD_USBH1_NXT__CSPI_MISO */
- IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 2, 0x000, 0), /* MX51_PAD_USBH1_NXT__GPIO1_28 */
- IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 5, 0x000, 0), /* MX51_PAD_USBH1_NXT__UART3_TXD */
- IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 0, 0x000, 0), /* MX51_PAD_USBH1_NXT__USBH1_NXT */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA0, 0x688, 0x288, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA0__GPIO1_11 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA0, 0x688, 0x288, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA0__UART2_CTS */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA0, 0x688, 0x288, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA0__USBH1_DATA0 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA1, 0x68c, 0x28c, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA1__GPIO1_12 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA1, 0x68c, 0x28c, 1, 0x9ec, 4), /* MX51_PAD_USBH1_DATA1__UART2_RXD */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA1, 0x68c, 0x28c, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA1__USBH1_DATA1 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA2, 0x690, 0x290, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA2__GPIO1_13 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA2, 0x690, 0x290, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA2__UART2_TXD */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA2, 0x690, 0x290, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA2__USBH1_DATA2 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA3, 0x694, 0x294, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA3__GPIO1_14 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA3, 0x694, 0x294, 1, 0x9e8, 5), /* MX51_PAD_USBH1_DATA3__UART2_RTS */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA3, 0x694, 0x294, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA3__USBH1_DATA3 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA4, 0x698, 0x298, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA4__CSPI_SS0 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA4, 0x698, 0x298, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA4__GPIO1_15 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA4, 0x698, 0x298, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA4__USBH1_DATA4 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA5, 0x69c, 0x29c, 1, 0x920, 0), /* MX51_PAD_USBH1_DATA5__CSPI_SS1 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA5, 0x69c, 0x29c, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA5__GPIO1_16 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA5, 0x69c, 0x29c, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA5__USBH1_DATA5 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA6, 0x6a0, 0x2a0, 1, 0x928, 1), /* MX51_PAD_USBH1_DATA6__CSPI_SS3 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA6, 0x6a0, 0x2a0, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA6__GPIO1_17 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA6, 0x6a0, 0x2a0, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA6__USBH1_DATA6 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA7__ECSPI1_SS3 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 5, 0x934, 1), /* MX51_PAD_USBH1_DATA7__ECSPI2_SS3 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA7__GPIO1_18 */
- IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA7__USBH1_DATA7 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN11, 0x6a8, 0x2a8, 0, 0x000, 0), /* MX51_PAD_DI1_PIN11__DI1_PIN11 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN11, 0x6a8, 0x2a8, 7, 0x000, 0), /* MX51_PAD_DI1_PIN11__ECSPI1_SS2 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN11, 0x6a8, 0x2a8, 4, 0x000, 0), /* MX51_PAD_DI1_PIN11__GPIO3_0 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN12, 0x6ac, 0x2ac, 0, 0x000, 0), /* MX51_PAD_DI1_PIN12__DI1_PIN12 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN12, 0x6ac, 0x2ac, 4, 0x978, 1), /* MX51_PAD_DI1_PIN12__GPIO3_1 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN13, 0x6b0, 0x2b0, 0, 0x000, 0), /* MX51_PAD_DI1_PIN13__DI1_PIN13 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN13, 0x6b0, 0x2b0, 4, 0x97c, 1), /* MX51_PAD_DI1_PIN13__GPIO3_2 */
- IMX_PIN_REG(MX51_PAD_DI1_D0_CS, 0x6b4, 0x2b4, 0, 0x000, 0), /* MX51_PAD_DI1_D0_CS__DI1_D0_CS */
- IMX_PIN_REG(MX51_PAD_DI1_D0_CS, 0x6b4, 0x2b4, 4, 0x980, 1), /* MX51_PAD_DI1_D0_CS__GPIO3_3 */
- IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 0, 0x000, 0), /* MX51_PAD_DI1_D1_CS__DI1_D1_CS */
- IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 2, 0x000, 0), /* MX51_PAD_DI1_D1_CS__DISP1_PIN14 */
- IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 3, 0x000, 0), /* MX51_PAD_DI1_D1_CS__DISP1_PIN5 */
- IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 4, 0x984, 1), /* MX51_PAD_DI1_D1_CS__GPIO3_4 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIN, 0x6bc, 0x2bc, 2, 0x9a4, 1), /* MX51_PAD_DISPB2_SER_DIN__DISP1_PIN1 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIN, 0x6bc, 0x2bc, 0, 0x9c4, 0), /* MX51_PAD_DISPB2_SER_DIN__DISPB2_SER_DIN */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIN, 0x6bc, 0x2bc, 4, 0x988, 1), /* MX51_PAD_DISPB2_SER_DIN__GPIO3_5 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIO, 0x6c0, 0x2c0, 3, 0x000, 0), /* MX51_PAD_DISPB2_SER_DIO__DISP1_PIN6 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIO, 0x6c0, 0x2c0, 0, 0x9c4, 1), /* MX51_PAD_DISPB2_SER_DIO__DISPB2_SER_DIO */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIO, 0x6c0, 0x2c0, 4, 0x98c, 1), /* MX51_PAD_DISPB2_SER_DIO__GPIO3_6 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 2, 0x000, 0), /* MX51_PAD_DISPB2_SER_CLK__DISP1_PIN17 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 3, 0x000, 0), /* MX51_PAD_DISPB2_SER_CLK__DISP1_PIN7 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 0, 0x000, 0), /* MX51_PAD_DISPB2_SER_CLK__DISPB2_SER_CLK */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 4, 0x990, 1), /* MX51_PAD_DISPB2_SER_CLK__GPIO3_7 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 2, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISP1_EXT_CLK */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 2, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISP1_PIN16 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 3, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISP1_PIN8 */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 0, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISPB2_SER_RS */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 0, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISPB2_SER_RS */
- IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 4, 0x994, 1), /* MX51_PAD_DISPB2_SER_RS__GPIO3_8 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT0, 0x6cc, 0x2cc, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT0__DISP1_DAT0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT1, 0x6d0, 0x2d0, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT1__DISP1_DAT1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT2, 0x6d4, 0x2d4, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT2__DISP1_DAT2 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT3, 0x6d8, 0x2d8, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT3__DISP1_DAT3 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT4, 0x6dc, 0x2dc, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT4__DISP1_DAT4 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT5, 0x6e0, 0x2e0, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT5__DISP1_DAT5 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT6, 0x6e4, 0x2e4, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT6__BOOT_USB_SRC */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT6, 0x6e4, 0x2e4, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT6__DISP1_DAT6 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT7, 0x6e8, 0x2e8, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT7__BOOT_EEPROM_CFG */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT7, 0x6e8, 0x2e8, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT7__DISP1_DAT7 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT8, 0x6ec, 0x2ec, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT8__BOOT_SRC0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT8, 0x6ec, 0x2ec, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT8__DISP1_DAT8 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT9, 0x6f0, 0x2f0, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT9__BOOT_SRC1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT9, 0x6f0, 0x2f0, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT9__DISP1_DAT9 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT10, 0x6f4, 0x2f4, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT10__BOOT_SPARE_SIZE */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT10, 0x6f4, 0x2f4, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT10__DISP1_DAT10 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT11, 0x6f8, 0x2f8, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT11__BOOT_LPB_FREQ2 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT11, 0x6f8, 0x2f8, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT11__DISP1_DAT11 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT12, 0x6fc, 0x2fc, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT12__BOOT_MLC_SEL */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT12, 0x6fc, 0x2fc, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT12__DISP1_DAT12 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT13, 0x700, 0x300, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT13__BOOT_MEM_CTL0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT13, 0x700, 0x300, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT13__DISP1_DAT13 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT14, 0x704, 0x304, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT14__BOOT_MEM_CTL1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT14, 0x704, 0x304, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT14__DISP1_DAT14 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT15, 0x708, 0x308, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT15__BOOT_BUS_WIDTH */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT15, 0x708, 0x308, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT15__DISP1_DAT15 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT16, 0x70c, 0x30c, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT16__BOOT_PAGE_SIZE0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT16, 0x70c, 0x30c, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT16__DISP1_DAT16 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT17, 0x710, 0x310, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT17__BOOT_PAGE_SIZE1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT17, 0x710, 0x310, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT17__DISP1_DAT17 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT18__BOOT_WEIM_MUXED0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT18__DISP1_DAT18 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT18__DISP2_PIN11 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT18__DISP2_PIN5 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT19__BOOT_WEIM_MUXED1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT19__DISP1_DAT19 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT19__DISP2_PIN12 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT19__DISP2_PIN6 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT20__BOOT_MEM_TYPE0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT20__DISP1_DAT20 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT20__DISP2_PIN13 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT20__DISP2_PIN7 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT21__BOOT_MEM_TYPE1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT21__DISP1_DAT21 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT21__DISP2_PIN14 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT21__DISP2_PIN8 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT22__BOOT_LPB_FREQ0 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT22__DISP1_DAT22 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 6, 0x000, 0), /* MX51_PAD_DISP1_DAT22__DISP2_D0_CS */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT22__DISP2_DAT16 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT23__BOOT_LPB_FREQ1 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP1_DAT23 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 6, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP2_D1_CS */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP2_DAT17 */
- IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP2_SER_CS */
- IMX_PIN_REG(MX51_PAD_DI1_PIN3, 0x72c, 0x32c, 0, 0x000, 0), /* MX51_PAD_DI1_PIN3__DI1_PIN3 */
- IMX_PIN_REG(MX51_PAD_DI1_PIN2, 0x734, 0x330, 0, 0x000, 0), /* MX51_PAD_DI1_PIN2__DI1_PIN2 */
- IMX_PIN_REG(MX51_PAD_DI_GP2, 0x740, 0x338, 0, 0x000, 0), /* MX51_PAD_DI_GP2__DISP1_SER_CLK */
- IMX_PIN_REG(MX51_PAD_DI_GP2, 0x740, 0x338, 2, 0x9a8, 1), /* MX51_PAD_DI_GP2__DISP2_WAIT */
- IMX_PIN_REG(MX51_PAD_DI_GP3, 0x744, 0x33c, 3, 0x9a0, 1), /* MX51_PAD_DI_GP3__CSI1_DATA_EN */
- IMX_PIN_REG(MX51_PAD_DI_GP3, 0x744, 0x33c, 0, 0x9c0, 0), /* MX51_PAD_DI_GP3__DISP1_SER_DIO */
- IMX_PIN_REG(MX51_PAD_DI_GP3, 0x744, 0x33c, 2, 0x000, 0), /* MX51_PAD_DI_GP3__FEC_TX_ER */
- IMX_PIN_REG(MX51_PAD_DI2_PIN4, 0x748, 0x340, 3, 0x99c, 1), /* MX51_PAD_DI2_PIN4__CSI2_DATA_EN */
- IMX_PIN_REG(MX51_PAD_DI2_PIN4, 0x748, 0x340, 0, 0x000, 0), /* MX51_PAD_DI2_PIN4__DI2_PIN4 */
- IMX_PIN_REG(MX51_PAD_DI2_PIN4, 0x748, 0x340, 2, 0x950, 1), /* MX51_PAD_DI2_PIN4__FEC_CRS */
- IMX_PIN_REG(MX51_PAD_DI2_PIN2, 0x74c, 0x344, 0, 0x000, 0), /* MX51_PAD_DI2_PIN2__DI2_PIN2 */
- IMX_PIN_REG(MX51_PAD_DI2_PIN2, 0x74c, 0x344, 2, 0x000, 0), /* MX51_PAD_DI2_PIN2__FEC_MDC */
- IMX_PIN_REG(MX51_PAD_DI2_PIN3, 0x750, 0x348, 0, 0x000, 0), /* MX51_PAD_DI2_PIN3__DI2_PIN3 */
- IMX_PIN_REG(MX51_PAD_DI2_PIN3, 0x750, 0x348, 2, 0x954, 1), /* MX51_PAD_DI2_PIN3__FEC_MDIO */
- IMX_PIN_REG(MX51_PAD_DI2_DISP_CLK, 0x754, 0x34c, 0, 0x000, 0), /* MX51_PAD_DI2_DISP_CLK__DI2_DISP_CLK */
- IMX_PIN_REG(MX51_PAD_DI2_DISP_CLK, 0x754, 0x34c, 2, 0x95c, 1), /* MX51_PAD_DI2_DISP_CLK__FEC_RDATA1 */
- IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 4, 0x000, 0), /* MX51_PAD_DI_GP4__DI2_PIN15 */
- IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 0, 0x9c0, 1), /* MX51_PAD_DI_GP4__DISP1_SER_DIN */
- IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 3, 0x000, 0), /* MX51_PAD_DI_GP4__DISP2_PIN1 */
- IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 2, 0x960, 1), /* MX51_PAD_DI_GP4__FEC_RDATA2 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT0__DISP2_DAT0 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 2, 0x964, 1), /* MX51_PAD_DISP2_DAT0__FEC_RDATA3 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 4, 0x9c8, 1), /* MX51_PAD_DISP2_DAT0__KEY_COL6 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 5, 0x9f4, 8), /* MX51_PAD_DISP2_DAT0__UART3_RXD */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 3, 0x9f8, 1), /* MX51_PAD_DISP2_DAT0__USBH3_CLK */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT1__DISP2_DAT1 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 2, 0x970, 1), /* MX51_PAD_DISP2_DAT1__FEC_RX_ER */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 4, 0x9cc, 1), /* MX51_PAD_DISP2_DAT1__KEY_COL7 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT1__UART3_TXD */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 3, 0xa1c, 1), /* MX51_PAD_DISP2_DAT1__USBH3_DIR */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT2, 0x764, 0x35c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT2__DISP2_DAT2 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT3, 0x768, 0x360, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT3__DISP2_DAT3 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT4, 0x76c, 0x364, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT4__DISP2_DAT4 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT5, 0x770, 0x368, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT5__DISP2_DAT5 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT6__DISP2_DAT6 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT6__FEC_TDATA1 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT6__GPIO1_19 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 4, 0x9d0, 1), /* MX51_PAD_DISP2_DAT6__KEY_ROW4 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 3, 0xa24, 1), /* MX51_PAD_DISP2_DAT6__USBH3_STP */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT7__DISP2_DAT7 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT7__FEC_TDATA2 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT7__GPIO1_29 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 4, 0x9d4, 1), /* MX51_PAD_DISP2_DAT7__KEY_ROW5 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 3, 0xa20, 1), /* MX51_PAD_DISP2_DAT7__USBH3_NXT */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT8__DISP2_DAT8 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT8__FEC_TDATA3 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT8__GPIO1_30 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 4, 0x9d8, 1), /* MX51_PAD_DISP2_DAT8__KEY_ROW6 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 3, 0x9fc, 1), /* MX51_PAD_DISP2_DAT8__USBH3_DATA0 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 4, 0x8f4, 1), /* MX51_PAD_DISP2_DAT9__AUD6_RXC */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT9__DISP2_DAT9 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT9__FEC_TX_EN */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT9__GPIO1_31 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 3, 0xa00, 1), /* MX51_PAD_DISP2_DAT9__USBH3_DATA1 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT10__DISP2_DAT10 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT10__DISP2_SER_CS */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 2, 0x94c, 1), /* MX51_PAD_DISP2_DAT10__FEC_COL */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 4, 0x9dc, 1), /* MX51_PAD_DISP2_DAT10__KEY_ROW7 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 3, 0xa04, 1), /* MX51_PAD_DISP2_DAT10__USBH3_DATA2 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 4, 0x8f0, 1), /* MX51_PAD_DISP2_DAT11__AUD6_TXD */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT11__DISP2_DAT11 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 2, 0x968, 1), /* MX51_PAD_DISP2_DAT11__FEC_RX_CLK */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 7, 0x000, 0), /* MX51_PAD_DISP2_DAT11__GPIO1_10 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 3, 0xa08, 1), /* MX51_PAD_DISP2_DAT11__USBH3_DATA3 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 4, 0x8ec, 1), /* MX51_PAD_DISP2_DAT12__AUD6_RXD */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT12__DISP2_DAT12 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 2, 0x96c, 1), /* MX51_PAD_DISP2_DAT12__FEC_RX_DV */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 3, 0xa0c, 1), /* MX51_PAD_DISP2_DAT12__USBH3_DATA4 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 4, 0x8fc, 1), /* MX51_PAD_DISP2_DAT13__AUD6_TXC */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT13__DISP2_DAT13 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 2, 0x974, 1), /* MX51_PAD_DISP2_DAT13__FEC_TX_CLK */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 3, 0xa10, 1), /* MX51_PAD_DISP2_DAT13__USBH3_DATA5 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 4, 0x900, 1), /* MX51_PAD_DISP2_DAT14__AUD6_TXFS */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT14__DISP2_DAT14 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 2, 0x958, 1), /* MX51_PAD_DISP2_DAT14__FEC_RDATA0 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 3, 0xa14, 1), /* MX51_PAD_DISP2_DAT14__USBH3_DATA6 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 4, 0x8f8, 1), /* MX51_PAD_DISP2_DAT15__AUD6_RXFS */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT15__DISP1_SER_CS */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT15__DISP2_DAT15 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT15__FEC_TDATA0 */
- IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 3, 0xa18, 1), /* MX51_PAD_DISP2_DAT15__USBH3_DATA7 */
- IMX_PIN_REG(MX51_PAD_SD1_CMD, 0x79c, 0x394, 1, 0x8e0, 1), /* MX51_PAD_SD1_CMD__AUD5_RXFS */
- IMX_PIN_REG(MX51_PAD_SD1_CMD, 0x79c, 0x394, 2, 0x91c, 2), /* MX51_PAD_SD1_CMD__CSPI_MOSI */
- IMX_PIN_REG(MX51_PAD_SD1_CMD, 0x79c, 0x394, 0, 0x000, 0), /* MX51_PAD_SD1_CMD__SD1_CMD */
- IMX_PIN_REG(MX51_PAD_SD1_CLK, 0x7a0, 0x398, 1, 0x8dc, 1), /* MX51_PAD_SD1_CLK__AUD5_RXC */
- IMX_PIN_REG(MX51_PAD_SD1_CLK, 0x7a0, 0x398, 2, 0x914, 2), /* MX51_PAD_SD1_CLK__CSPI_SCLK */
- IMX_PIN_REG(MX51_PAD_SD1_CLK, 0x7a0, 0x398, 0, 0x000, 0), /* MX51_PAD_SD1_CLK__SD1_CLK */
- IMX_PIN_REG(MX51_PAD_SD1_DATA0, 0x7a4, 0x39c, 1, 0x8d8, 2), /* MX51_PAD_SD1_DATA0__AUD5_TXD */
- IMX_PIN_REG(MX51_PAD_SD1_DATA0, 0x7a4, 0x39c, 2, 0x918, 1), /* MX51_PAD_SD1_DATA0__CSPI_MISO */
- IMX_PIN_REG(MX51_PAD_SD1_DATA0, 0x7a4, 0x39c, 0, 0x000, 0), /* MX51_PAD_SD1_DATA0__SD1_DATA0 */
- IMX_PIN_REG(MX51_PAD_EIM_DA0, NO_PAD, 0x01c, 0, 0x000, 0), /* MX51_PAD_EIM_DA0__EIM_DA0 */
- IMX_PIN_REG(MX51_PAD_EIM_DA1, NO_PAD, 0x020, 0, 0x000, 0), /* MX51_PAD_EIM_DA1__EIM_DA1 */
- IMX_PIN_REG(MX51_PAD_EIM_DA2, NO_PAD, 0x024, 0, 0x000, 0), /* MX51_PAD_EIM_DA2__EIM_DA2 */
- IMX_PIN_REG(MX51_PAD_EIM_DA3, NO_PAD, 0x028, 0, 0x000, 0), /* MX51_PAD_EIM_DA3__EIM_DA3 */
- IMX_PIN_REG(MX51_PAD_SD1_DATA1, 0x7a8, 0x3a0, 1, 0x8d4, 2), /* MX51_PAD_SD1_DATA1__AUD5_RXD */
- IMX_PIN_REG(MX51_PAD_SD1_DATA1, 0x7a8, 0x3a0, 0, 0x000, 0), /* MX51_PAD_SD1_DATA1__SD1_DATA1 */
- IMX_PIN_REG(MX51_PAD_EIM_DA4, NO_PAD, 0x02c, 0, 0x000, 0), /* MX51_PAD_EIM_DA4__EIM_DA4 */
- IMX_PIN_REG(MX51_PAD_EIM_DA5, NO_PAD, 0x030, 0, 0x000, 0), /* MX51_PAD_EIM_DA5__EIM_DA5 */
- IMX_PIN_REG(MX51_PAD_EIM_DA6, NO_PAD, 0x034, 0, 0x000, 0), /* MX51_PAD_EIM_DA6__EIM_DA6 */
- IMX_PIN_REG(MX51_PAD_EIM_DA7, NO_PAD, 0x038, 0, 0x000, 0), /* MX51_PAD_EIM_DA7__EIM_DA7 */
- IMX_PIN_REG(MX51_PAD_SD1_DATA2, 0x7ac, 0x3a4, 1, 0x8e4, 2), /* MX51_PAD_SD1_DATA2__AUD5_TXC */
- IMX_PIN_REG(MX51_PAD_SD1_DATA2, 0x7ac, 0x3a4, 0, 0x000, 0), /* MX51_PAD_SD1_DATA2__SD1_DATA2 */
- IMX_PIN_REG(MX51_PAD_EIM_DA10, NO_PAD, 0x044, 0, 0x000, 0), /* MX51_PAD_EIM_DA10__EIM_DA10 */
- IMX_PIN_REG(MX51_PAD_EIM_DA11, NO_PAD, 0x048, 0, 0x000, 0), /* MX51_PAD_EIM_DA11__EIM_DA11 */
- IMX_PIN_REG(MX51_PAD_EIM_DA8, NO_PAD, 0x03c, 0, 0x000, 0), /* MX51_PAD_EIM_DA8__EIM_DA8 */
- IMX_PIN_REG(MX51_PAD_EIM_DA9, NO_PAD, 0x040, 0, 0x000, 0), /* MX51_PAD_EIM_DA9__EIM_DA9 */
- IMX_PIN_REG(MX51_PAD_SD1_DATA3, 0x7b0, 0x3a8, 1, 0x8e8, 2), /* MX51_PAD_SD1_DATA3__AUD5_TXFS */
- IMX_PIN_REG(MX51_PAD_SD1_DATA3, 0x7b0, 0x3a8, 2, 0x920, 1), /* MX51_PAD_SD1_DATA3__CSPI_SS1 */
- IMX_PIN_REG(MX51_PAD_SD1_DATA3, 0x7b0, 0x3a8, 0, 0x000, 0), /* MX51_PAD_SD1_DATA3__SD1_DATA3 */
- IMX_PIN_REG(MX51_PAD_GPIO1_0, 0x7b4, 0x3ac, 2, 0x924, 0), /* MX51_PAD_GPIO1_0__CSPI_SS2 */
- IMX_PIN_REG(MX51_PAD_GPIO1_0, 0x7b4, 0x3ac, 1, 0x000, 0), /* MX51_PAD_GPIO1_0__GPIO1_0 */
- IMX_PIN_REG(MX51_PAD_GPIO1_0, 0x7b4, 0x3ac, 0, 0x000, 0), /* MX51_PAD_GPIO1_0__SD1_CD */
- IMX_PIN_REG(MX51_PAD_GPIO1_1, 0x7b8, 0x3b0, 2, 0x918, 2), /* MX51_PAD_GPIO1_1__CSPI_MISO */
- IMX_PIN_REG(MX51_PAD_GPIO1_1, 0x7b8, 0x3b0, 1, 0x000, 0), /* MX51_PAD_GPIO1_1__GPIO1_1 */
- IMX_PIN_REG(MX51_PAD_GPIO1_1, 0x7b8, 0x3b0, 0, 0x000, 0), /* MX51_PAD_GPIO1_1__SD1_WP */
- IMX_PIN_REG(MX51_PAD_EIM_DA12, NO_PAD, 0x04c, 0, 0x000, 0), /* MX51_PAD_EIM_DA12__EIM_DA12 */
- IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */
- IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */
- IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */
- IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
- IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */
- IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */
- IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */
- IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 1, 0x9b4, 2), /* MX51_PAD_SD2_CLK__I2C1_SDA */
- IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 0, 0x000, 0), /* MX51_PAD_SD2_CLK__SD2_CLK */
- IMX_PIN_REG(MX51_PAD_SD2_DATA0, 0x7c4, 0x3bc, 2, 0x918, 3), /* MX51_PAD_SD2_DATA0__CSPI_MISO */
- IMX_PIN_REG(MX51_PAD_SD2_DATA0, 0x7c4, 0x3bc, 1, 0x000, 0), /* MX51_PAD_SD2_DATA0__SD1_DAT4 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA0, 0x7c4, 0x3bc, 0, 0x000, 0), /* MX51_PAD_SD2_DATA0__SD2_DATA0 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA1, 0x7c8, 0x3c0, 1, 0x000, 0), /* MX51_PAD_SD2_DATA1__SD1_DAT5 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA1, 0x7c8, 0x3c0, 0, 0x000, 0), /* MX51_PAD_SD2_DATA1__SD2_DATA1 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA1, 0x7c8, 0x3c0, 2, 0x000, 0), /* MX51_PAD_SD2_DATA1__USBH3_H2_DP */
- IMX_PIN_REG(MX51_PAD_SD2_DATA2, 0x7cc, 0x3c4, 1, 0x000, 0), /* MX51_PAD_SD2_DATA2__SD1_DAT6 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA2, 0x7cc, 0x3c4, 0, 0x000, 0), /* MX51_PAD_SD2_DATA2__SD2_DATA2 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA2, 0x7cc, 0x3c4, 2, 0x000, 0), /* MX51_PAD_SD2_DATA2__USBH3_H2_DM */
- IMX_PIN_REG(MX51_PAD_SD2_DATA3, 0x7d0, 0x3c8, 2, 0x924, 1), /* MX51_PAD_SD2_DATA3__CSPI_SS2 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA3, 0x7d0, 0x3c8, 1, 0x000, 0), /* MX51_PAD_SD2_DATA3__SD1_DAT7 */
- IMX_PIN_REG(MX51_PAD_SD2_DATA3, 0x7d0, 0x3c8, 0, 0x000, 0), /* MX51_PAD_SD2_DATA3__SD2_DATA3 */
- IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 5, 0x000, 0), /* MX51_PAD_GPIO1_2__CCM_OUT_2 */
- IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 0, 0x000, 0), /* MX51_PAD_GPIO1_2__GPIO1_2 */
- IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 2, 0x9b8, 3), /* MX51_PAD_GPIO1_2__I2C2_SCL */
- IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 7, 0x90c, 1), /* MX51_PAD_GPIO1_2__PLL1_BYP */
- IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 1, 0x000, 0), /* MX51_PAD_GPIO1_2__PWM1_PWMO */
- IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 0, 0x000, 0), /* MX51_PAD_GPIO1_3__GPIO1_3 */
- IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 2, 0x9bc, 3), /* MX51_PAD_GPIO1_3__I2C2_SDA */
- IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 7, 0x910, 1), /* MX51_PAD_GPIO1_3__PLL2_BYP */
- IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 1, 0x000, 0), /* MX51_PAD_GPIO1_3__PWM2_PWMO */
- IMX_PIN_REG(MX51_PAD_PMIC_INT_REQ, 0x7fc, 0x3d4, 0, 0x000, 0), /* MX51_PAD_PMIC_INT_REQ__PMIC_INT_REQ */
- IMX_PIN_REG(MX51_PAD_PMIC_INT_REQ, 0x7fc, 0x3d4, 1, 0x000, 0), /* MX51_PAD_PMIC_INT_REQ__PMIC_PMU_IRQ_B */
- IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 4, 0x908, 1), /* MX51_PAD_GPIO1_4__DISP2_EXT_CLK */
- IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 3, 0x938, 1), /* MX51_PAD_GPIO1_4__EIM_RDY */
- IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 0, 0x000, 0), /* MX51_PAD_GPIO1_4__GPIO1_4 */
- IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 2, 0x000, 0), /* MX51_PAD_GPIO1_4__WDOG1_WDOG_B */
- IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 6, 0x000, 0), /* MX51_PAD_GPIO1_5__CSI2_MCLK */
- IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 3, 0x000, 0), /* MX51_PAD_GPIO1_5__DISP2_PIN16 */
- IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 0, 0x000, 0), /* MX51_PAD_GPIO1_5__GPIO1_5 */
- IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 2, 0x000, 0), /* MX51_PAD_GPIO1_5__WDOG2_WDOG_B */
- IMX_PIN_REG(MX51_PAD_GPIO1_6, 0x80c, 0x3e0, 4, 0x000, 0), /* MX51_PAD_GPIO1_6__DISP2_PIN17 */
- IMX_PIN_REG(MX51_PAD_GPIO1_6, 0x80c, 0x3e0, 0, 0x000, 0), /* MX51_PAD_GPIO1_6__GPIO1_6 */
- IMX_PIN_REG(MX51_PAD_GPIO1_6, 0x80c, 0x3e0, 3, 0x000, 0), /* MX51_PAD_GPIO1_6__REF_EN_B */
- IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 3, 0x000, 0), /* MX51_PAD_GPIO1_7__CCM_OUT_0 */
- IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 0, 0x000, 0), /* MX51_PAD_GPIO1_7__GPIO1_7 */
- IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 6, 0x000, 0), /* MX51_PAD_GPIO1_7__SD2_WP */
- IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 2, 0x000, 0), /* MX51_PAD_GPIO1_7__SPDIF_OUT1 */
- IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 2, 0x99c, 2), /* MX51_PAD_GPIO1_8__CSI2_DATA_EN */
- IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 0, 0x000, 0), /* MX51_PAD_GPIO1_8__GPIO1_8 */
- IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 6, 0x000, 0), /* MX51_PAD_GPIO1_8__SD2_CD */
- IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 1, 0x000, 0), /* MX51_PAD_GPIO1_8__USBH3_PWR */
- IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 3, 0x000, 0), /* MX51_PAD_GPIO1_9__CCM_OUT_1 */
- IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 2, 0x000, 0), /* MX51_PAD_GPIO1_9__DISP2_D1_CS */
- IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 7, 0x000, 0), /* MX51_PAD_GPIO1_9__DISP2_SER_CS */
- IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 0, 0x000, 0), /* MX51_PAD_GPIO1_9__GPIO1_9 */
- IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 6, 0x000, 0), /* MX51_PAD_GPIO1_9__SD2_LCTL */
- IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 1, 0x000, 0), /* MX51_PAD_GPIO1_9__USBH3_OC */
+ MX51_PAD_RESERVE0 = 0,
+ MX51_PAD_RESERVE1 = 1,
+ MX51_PAD_RESERVE2 = 2,
+ MX51_PAD_RESERVE3 = 3,
+ MX51_PAD_RESERVE4 = 4,
+ MX51_PAD_RESERVE5 = 5,
+ MX51_PAD_RESERVE6 = 6,
+ MX51_PAD_EIM_DA0 = 7,
+ MX51_PAD_EIM_DA1 = 8,
+ MX51_PAD_EIM_DA2 = 9,
+ MX51_PAD_EIM_DA3 = 10,
+ MX51_PAD_EIM_DA4 = 11,
+ MX51_PAD_EIM_DA5 = 12,
+ MX51_PAD_EIM_DA6 = 13,
+ MX51_PAD_EIM_DA7 = 14,
+ MX51_PAD_EIM_DA8 = 15,
+ MX51_PAD_EIM_DA9 = 16,
+ MX51_PAD_EIM_DA10 = 17,
+ MX51_PAD_EIM_DA11 = 18,
+ MX51_PAD_EIM_DA12 = 19,
+ MX51_PAD_EIM_DA13 = 20,
+ MX51_PAD_EIM_DA14 = 21,
+ MX51_PAD_EIM_DA15 = 22,
+ MX51_PAD_EIM_D16 = 23,
+ MX51_PAD_EIM_D17 = 24,
+ MX51_PAD_EIM_D18 = 25,
+ MX51_PAD_EIM_D19 = 26,
+ MX51_PAD_EIM_D20 = 27,
+ MX51_PAD_EIM_D21 = 28,
+ MX51_PAD_EIM_D22 = 29,
+ MX51_PAD_EIM_D23 = 30,
+ MX51_PAD_EIM_D24 = 31,
+ MX51_PAD_EIM_D25 = 32,
+ MX51_PAD_EIM_D26 = 33,
+ MX51_PAD_EIM_D27 = 34,
+ MX51_PAD_EIM_D28 = 35,
+ MX51_PAD_EIM_D29 = 36,
+ MX51_PAD_EIM_D30 = 37,
+ MX51_PAD_EIM_D31 = 38,
+ MX51_PAD_EIM_A16 = 39,
+ MX51_PAD_EIM_A17 = 40,
+ MX51_PAD_EIM_A18 = 41,
+ MX51_PAD_EIM_A19 = 42,
+ MX51_PAD_EIM_A20 = 43,
+ MX51_PAD_EIM_A21 = 44,
+ MX51_PAD_EIM_A22 = 45,
+ MX51_PAD_EIM_A23 = 46,
+ MX51_PAD_EIM_A24 = 47,
+ MX51_PAD_EIM_A25 = 48,
+ MX51_PAD_EIM_A26 = 49,
+ MX51_PAD_EIM_A27 = 50,
+ MX51_PAD_EIM_EB0 = 51,
+ MX51_PAD_EIM_EB1 = 52,
+ MX51_PAD_EIM_EB2 = 53,
+ MX51_PAD_EIM_EB3 = 54,
+ MX51_PAD_EIM_OE = 55,
+ MX51_PAD_EIM_CS0 = 56,
+ MX51_PAD_EIM_CS1 = 57,
+ MX51_PAD_EIM_CS2 = 58,
+ MX51_PAD_EIM_CS3 = 59,
+ MX51_PAD_EIM_CS4 = 60,
+ MX51_PAD_EIM_CS5 = 61,
+ MX51_PAD_EIM_DTACK = 62,
+ MX51_PAD_EIM_LBA = 63,
+ MX51_PAD_EIM_CRE = 64,
+ MX51_PAD_DRAM_CS1 = 65,
+ MX51_PAD_NANDF_WE_B = 66,
+ MX51_PAD_NANDF_RE_B = 67,
+ MX51_PAD_NANDF_ALE = 68,
+ MX51_PAD_NANDF_CLE = 69,
+ MX51_PAD_NANDF_WP_B = 70,
+ MX51_PAD_NANDF_RB0 = 71,
+ MX51_PAD_NANDF_RB1 = 72,
+ MX51_PAD_NANDF_RB2 = 73,
+ MX51_PAD_NANDF_RB3 = 74,
+ MX51_PAD_GPIO_NAND = 75,
+ MX51_PAD_NANDF_CS0 = 76,
+ MX51_PAD_NANDF_CS1 = 77,
+ MX51_PAD_NANDF_CS2 = 78,
+ MX51_PAD_NANDF_CS3 = 79,
+ MX51_PAD_NANDF_CS4 = 80,
+ MX51_PAD_NANDF_CS5 = 81,
+ MX51_PAD_NANDF_CS6 = 82,
+ MX51_PAD_NANDF_CS7 = 83,
+ MX51_PAD_NANDF_RDY_INT = 84,
+ MX51_PAD_NANDF_D15 = 85,
+ MX51_PAD_NANDF_D14 = 86,
+ MX51_PAD_NANDF_D13 = 87,
+ MX51_PAD_NANDF_D12 = 88,
+ MX51_PAD_NANDF_D11 = 89,
+ MX51_PAD_NANDF_D10 = 90,
+ MX51_PAD_NANDF_D9 = 91,
+ MX51_PAD_NANDF_D8 = 92,
+ MX51_PAD_NANDF_D7 = 93,
+ MX51_PAD_NANDF_D6 = 94,
+ MX51_PAD_NANDF_D5 = 95,
+ MX51_PAD_NANDF_D4 = 96,
+ MX51_PAD_NANDF_D3 = 97,
+ MX51_PAD_NANDF_D2 = 98,
+ MX51_PAD_NANDF_D1 = 99,
+ MX51_PAD_NANDF_D0 = 100,
+ MX51_PAD_CSI1_D8 = 101,
+ MX51_PAD_CSI1_D9 = 102,
+ MX51_PAD_CSI1_D10 = 103,
+ MX51_PAD_CSI1_D11 = 104,
+ MX51_PAD_CSI1_D12 = 105,
+ MX51_PAD_CSI1_D13 = 106,
+ MX51_PAD_CSI1_D14 = 107,
+ MX51_PAD_CSI1_D15 = 108,
+ MX51_PAD_CSI1_D16 = 109,
+ MX51_PAD_CSI1_D17 = 110,
+ MX51_PAD_CSI1_D18 = 111,
+ MX51_PAD_CSI1_D19 = 112,
+ MX51_PAD_CSI1_VSYNC = 113,
+ MX51_PAD_CSI1_HSYNC = 114,
+ MX51_PAD_CSI2_D12 = 115,
+ MX51_PAD_CSI2_D13 = 116,
+ MX51_PAD_CSI2_D14 = 117,
+ MX51_PAD_CSI2_D15 = 118,
+ MX51_PAD_CSI2_D16 = 119,
+ MX51_PAD_CSI2_D17 = 120,
+ MX51_PAD_CSI2_D18 = 121,
+ MX51_PAD_CSI2_D19 = 122,
+ MX51_PAD_CSI2_VSYNC = 123,
+ MX51_PAD_CSI2_HSYNC = 124,
+ MX51_PAD_CSI2_PIXCLK = 125,
+ MX51_PAD_I2C1_CLK = 126,
+ MX51_PAD_I2C1_DAT = 127,
+ MX51_PAD_AUD3_BB_TXD = 128,
+ MX51_PAD_AUD3_BB_RXD = 129,
+ MX51_PAD_AUD3_BB_CK = 130,
+ MX51_PAD_AUD3_BB_FS = 131,
+ MX51_PAD_CSPI1_MOSI = 132,
+ MX51_PAD_CSPI1_MISO = 133,
+ MX51_PAD_CSPI1_SS0 = 134,
+ MX51_PAD_CSPI1_SS1 = 135,
+ MX51_PAD_CSPI1_RDY = 136,
+ MX51_PAD_CSPI1_SCLK = 137,
+ MX51_PAD_UART1_RXD = 138,
+ MX51_PAD_UART1_TXD = 139,
+ MX51_PAD_UART1_RTS = 140,
+ MX51_PAD_UART1_CTS = 141,
+ MX51_PAD_UART2_RXD = 142,
+ MX51_PAD_UART2_TXD = 143,
+ MX51_PAD_UART3_RXD = 144,
+ MX51_PAD_UART3_TXD = 145,
+ MX51_PAD_OWIRE_LINE = 146,
+ MX51_PAD_KEY_ROW0 = 147,
+ MX51_PAD_KEY_ROW1 = 148,
+ MX51_PAD_KEY_ROW2 = 149,
+ MX51_PAD_KEY_ROW3 = 150,
+ MX51_PAD_KEY_COL0 = 151,
+ MX51_PAD_KEY_COL1 = 152,
+ MX51_PAD_KEY_COL2 = 153,
+ MX51_PAD_KEY_COL3 = 154,
+ MX51_PAD_KEY_COL4 = 155,
+ MX51_PAD_KEY_COL5 = 156,
+ MX51_PAD_RESERVE7 = 157,
+ MX51_PAD_USBH1_CLK = 158,
+ MX51_PAD_USBH1_DIR = 159,
+ MX51_PAD_USBH1_STP = 160,
+ MX51_PAD_USBH1_NXT = 161,
+ MX51_PAD_USBH1_DATA0 = 162,
+ MX51_PAD_USBH1_DATA1 = 163,
+ MX51_PAD_USBH1_DATA2 = 164,
+ MX51_PAD_USBH1_DATA3 = 165,
+ MX51_PAD_USBH1_DATA4 = 166,
+ MX51_PAD_USBH1_DATA5 = 167,
+ MX51_PAD_USBH1_DATA6 = 168,
+ MX51_PAD_USBH1_DATA7 = 169,
+ MX51_PAD_DI1_PIN11 = 170,
+ MX51_PAD_DI1_PIN12 = 171,
+ MX51_PAD_DI1_PIN13 = 172,
+ MX51_PAD_DI1_D0_CS = 173,
+ MX51_PAD_DI1_D1_CS = 174,
+ MX51_PAD_DISPB2_SER_DIN = 175,
+ MX51_PAD_DISPB2_SER_DIO = 176,
+ MX51_PAD_DISPB2_SER_CLK = 177,
+ MX51_PAD_DISPB2_SER_RS = 178,
+ MX51_PAD_DISP1_DAT0 = 179,
+ MX51_PAD_DISP1_DAT1 = 180,
+ MX51_PAD_DISP1_DAT2 = 181,
+ MX51_PAD_DISP1_DAT3 = 182,
+ MX51_PAD_DISP1_DAT4 = 183,
+ MX51_PAD_DISP1_DAT5 = 184,
+ MX51_PAD_DISP1_DAT6 = 185,
+ MX51_PAD_DISP1_DAT7 = 186,
+ MX51_PAD_DISP1_DAT8 = 187,
+ MX51_PAD_DISP1_DAT9 = 188,
+ MX51_PAD_DISP1_DAT10 = 189,
+ MX51_PAD_DISP1_DAT11 = 190,
+ MX51_PAD_DISP1_DAT12 = 191,
+ MX51_PAD_DISP1_DAT13 = 192,
+ MX51_PAD_DISP1_DAT14 = 193,
+ MX51_PAD_DISP1_DAT15 = 194,
+ MX51_PAD_DISP1_DAT16 = 195,
+ MX51_PAD_DISP1_DAT17 = 196,
+ MX51_PAD_DISP1_DAT18 = 197,
+ MX51_PAD_DISP1_DAT19 = 198,
+ MX51_PAD_DISP1_DAT20 = 199,
+ MX51_PAD_DISP1_DAT21 = 200,
+ MX51_PAD_DISP1_DAT22 = 201,
+ MX51_PAD_DISP1_DAT23 = 202,
+ MX51_PAD_DI1_PIN3 = 203,
+ MX51_PAD_DI1_PIN2 = 204,
+ MX51_PAD_RESERVE8 = 205,
+ MX51_PAD_DI_GP2 = 206,
+ MX51_PAD_DI_GP3 = 207,
+ MX51_PAD_DI2_PIN4 = 208,
+ MX51_PAD_DI2_PIN2 = 209,
+ MX51_PAD_DI2_PIN3 = 210,
+ MX51_PAD_DI2_DISP_CLK = 211,
+ MX51_PAD_DI_GP4 = 212,
+ MX51_PAD_DISP2_DAT0 = 213,
+ MX51_PAD_DISP2_DAT1 = 214,
+ MX51_PAD_DISP2_DAT2 = 215,
+ MX51_PAD_DISP2_DAT3 = 216,
+ MX51_PAD_DISP2_DAT4 = 217,
+ MX51_PAD_DISP2_DAT5 = 218,
+ MX51_PAD_DISP2_DAT6 = 219,
+ MX51_PAD_DISP2_DAT7 = 220,
+ MX51_PAD_DISP2_DAT8 = 221,
+ MX51_PAD_DISP2_DAT9 = 222,
+ MX51_PAD_DISP2_DAT10 = 223,
+ MX51_PAD_DISP2_DAT11 = 224,
+ MX51_PAD_DISP2_DAT12 = 225,
+ MX51_PAD_DISP2_DAT13 = 226,
+ MX51_PAD_DISP2_DAT14 = 227,
+ MX51_PAD_DISP2_DAT15 = 228,
+ MX51_PAD_SD1_CMD = 229,
+ MX51_PAD_SD1_CLK = 230,
+ MX51_PAD_SD1_DATA0 = 231,
+ MX51_PAD_SD1_DATA1 = 232,
+ MX51_PAD_SD1_DATA2 = 233,
+ MX51_PAD_SD1_DATA3 = 234,
+ MX51_PAD_GPIO1_0 = 235,
+ MX51_PAD_GPIO1_1 = 236,
+ MX51_PAD_SD2_CMD = 237,
+ MX51_PAD_SD2_CLK = 238,
+ MX51_PAD_SD2_DATA0 = 239,
+ MX51_PAD_SD2_DATA1 = 240,
+ MX51_PAD_SD2_DATA2 = 241,
+ MX51_PAD_SD2_DATA3 = 242,
+ MX51_PAD_GPIO1_2 = 243,
+ MX51_PAD_GPIO1_3 = 244,
+ MX51_PAD_PMIC_INT_REQ = 245,
+ MX51_PAD_GPIO1_4 = 246,
+ MX51_PAD_GPIO1_5 = 247,
+ MX51_PAD_GPIO1_6 = 248,
+ MX51_PAD_GPIO1_7 = 249,
+ MX51_PAD_GPIO1_8 = 250,
+ MX51_PAD_GPIO1_9 = 251,
+ MX51_PAD_RESERVE9 = 252,
+ MX51_PAD_RESERVE10 = 253,
+ MX51_PAD_RESERVE11 = 254,
+ MX51_PAD_RESERVE12 = 255,
+ MX51_PAD_RESERVE13 = 256,
+ MX51_PAD_RESERVE14 = 257,
+ MX51_PAD_RESERVE15 = 258,
+ MX51_PAD_RESERVE16 = 259,
+ MX51_PAD_RESERVE17 = 260,
+ MX51_PAD_RESERVE18 = 261,
+ MX51_PAD_RESERVE19 = 262,
+ MX51_PAD_RESERVE20 = 263,
+ MX51_PAD_RESERVE21 = 264,
+ MX51_PAD_RESERVE22 = 265,
+ MX51_PAD_RESERVE23 = 266,
+ MX51_PAD_RESERVE24 = 267,
+ MX51_PAD_RESERVE25 = 268,
+ MX51_PAD_RESERVE26 = 269,
+ MX51_PAD_RESERVE27 = 270,
+ MX51_PAD_RESERVE28 = 271,
+ MX51_PAD_RESERVE29 = 272,
+ MX51_PAD_RESERVE30 = 273,
+ MX51_PAD_RESERVE31 = 274,
+ MX51_PAD_RESERVE32 = 275,
+ MX51_PAD_RESERVE33 = 276,
+ MX51_PAD_RESERVE34 = 277,
+ MX51_PAD_RESERVE35 = 278,
+ MX51_PAD_RESERVE36 = 279,
+ MX51_PAD_RESERVE37 = 280,
+ MX51_PAD_RESERVE38 = 281,
+ MX51_PAD_RESERVE39 = 282,
+ MX51_PAD_RESERVE40 = 283,
+ MX51_PAD_RESERVE41 = 284,
+ MX51_PAD_RESERVE42 = 285,
+ MX51_PAD_RESERVE43 = 286,
+ MX51_PAD_RESERVE44 = 287,
+ MX51_PAD_RESERVE45 = 288,
+ MX51_PAD_RESERVE46 = 289,
+ MX51_PAD_RESERVE47 = 290,
+ MX51_PAD_RESERVE48 = 291,
+ MX51_PAD_RESERVE49 = 292,
+ MX51_PAD_RESERVE50 = 293,
+ MX51_PAD_RESERVE51 = 294,
+ MX51_PAD_RESERVE52 = 295,
+ MX51_PAD_RESERVE53 = 296,
+ MX51_PAD_RESERVE54 = 297,
+ MX51_PAD_RESERVE55 = 298,
+ MX51_PAD_RESERVE56 = 299,
+ MX51_PAD_RESERVE57 = 300,
+ MX51_PAD_RESERVE58 = 301,
+ MX51_PAD_RESERVE59 = 302,
+ MX51_PAD_RESERVE60 = 303,
+ MX51_PAD_RESERVE61 = 304,
+ MX51_PAD_RESERVE62 = 305,
+ MX51_PAD_RESERVE63 = 306,
+ MX51_PAD_RESERVE64 = 307,
+ MX51_PAD_RESERVE65 = 308,
+ MX51_PAD_RESERVE66 = 309,
+ MX51_PAD_RESERVE67 = 310,
+ MX51_PAD_RESERVE68 = 311,
+ MX51_PAD_RESERVE69 = 312,
+ MX51_PAD_RESERVE70 = 313,
+ MX51_PAD_RESERVE71 = 314,
+ MX51_PAD_RESERVE72 = 315,
+ MX51_PAD_RESERVE73 = 316,
+ MX51_PAD_RESERVE74 = 317,
+ MX51_PAD_RESERVE75 = 318,
+ MX51_PAD_RESERVE76 = 319,
+ MX51_PAD_RESERVE77 = 320,
+ MX51_PAD_RESERVE78 = 321,
+ MX51_PAD_RESERVE79 = 322,
+ MX51_PAD_RESERVE80 = 323,
+ MX51_PAD_RESERVE81 = 324,
+ MX51_PAD_RESERVE82 = 325,
+ MX51_PAD_RESERVE83 = 326,
+ MX51_PAD_RESERVE84 = 327,
+ MX51_PAD_RESERVE85 = 328,
+ MX51_PAD_RESERVE86 = 329,
+ MX51_PAD_RESERVE87 = 330,
+ MX51_PAD_RESERVE88 = 331,
+ MX51_PAD_RESERVE89 = 332,
+ MX51_PAD_RESERVE90 = 333,
+ MX51_PAD_RESERVE91 = 334,
+ MX51_PAD_RESERVE92 = 335,
+ MX51_PAD_RESERVE93 = 336,
+ MX51_PAD_RESERVE94 = 337,
+ MX51_PAD_RESERVE95 = 338,
+ MX51_PAD_RESERVE96 = 339,
+ MX51_PAD_RESERVE97 = 340,
+ MX51_PAD_RESERVE98 = 341,
+ MX51_PAD_RESERVE99 = 342,
+ MX51_PAD_RESERVE100 = 343,
+ MX51_PAD_RESERVE101 = 344,
+ MX51_PAD_RESERVE102 = 345,
+ MX51_PAD_RESERVE103 = 346,
+ MX51_PAD_RESERVE104 = 347,
+ MX51_PAD_RESERVE105 = 348,
+ MX51_PAD_RESERVE106 = 349,
+ MX51_PAD_RESERVE107 = 350,
+ MX51_PAD_RESERVE108 = 351,
+ MX51_PAD_RESERVE109 = 352,
+ MX51_PAD_RESERVE110 = 353,
+ MX51_PAD_RESERVE111 = 354,
+ MX51_PAD_RESERVE112 = 355,
+ MX51_PAD_RESERVE113 = 356,
+ MX51_PAD_RESERVE114 = 357,
+ MX51_PAD_RESERVE115 = 358,
+ MX51_PAD_RESERVE116 = 359,
+ MX51_PAD_RESERVE117 = 360,
+ MX51_PAD_RESERVE118 = 361,
+ MX51_PAD_RESERVE119 = 362,
+ MX51_PAD_RESERVE120 = 363,
+ MX51_PAD_RESERVE121 = 364,
+ MX51_PAD_CSI1_PIXCLK = 365,
+ MX51_PAD_CSI1_MCLK = 366,
};
/* Pad names for the pinmux subsystem */
static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA0),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA1),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA2),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA3),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA4),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA5),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA6),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA7),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA8),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA9),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA10),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA11),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA12),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA13),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA14),
+ IMX_PINCTRL_PIN(MX51_PAD_EIM_DA15),
IMX_PINCTRL_PIN(MX51_PAD_EIM_D16),
IMX_PINCTRL_PIN(MX51_PAD_EIM_D17),
IMX_PINCTRL_PIN(MX51_PAD_EIM_D18),
@@ -1124,8 +509,6 @@ static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX51_PAD_CSI1_D19),
IMX_PINCTRL_PIN(MX51_PAD_CSI1_VSYNC),
IMX_PINCTRL_PIN(MX51_PAD_CSI1_HSYNC),
- IMX_PINCTRL_PIN(MX51_PAD_CSI1_PIXCLK),
- IMX_PINCTRL_PIN(MX51_PAD_CSI1_MCLK),
IMX_PINCTRL_PIN(MX51_PAD_CSI2_D12),
IMX_PINCTRL_PIN(MX51_PAD_CSI2_D13),
IMX_PINCTRL_PIN(MX51_PAD_CSI2_D14),
@@ -1168,6 +551,7 @@ static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX51_PAD_KEY_COL3),
IMX_PINCTRL_PIN(MX51_PAD_KEY_COL4),
IMX_PINCTRL_PIN(MX51_PAD_KEY_COL5),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE7),
IMX_PINCTRL_PIN(MX51_PAD_USBH1_CLK),
IMX_PINCTRL_PIN(MX51_PAD_USBH1_DIR),
IMX_PINCTRL_PIN(MX51_PAD_USBH1_STP),
@@ -1215,6 +599,7 @@ static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT23),
IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN3),
IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN2),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE8),
IMX_PINCTRL_PIN(MX51_PAD_DI_GP2),
IMX_PINCTRL_PIN(MX51_PAD_DI_GP3),
IMX_PINCTRL_PIN(MX51_PAD_DI2_PIN4),
@@ -1241,27 +626,11 @@ static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX51_PAD_SD1_CMD),
IMX_PINCTRL_PIN(MX51_PAD_SD1_CLK),
IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA0),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA0),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA1),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA2),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA3),
IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA1),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA4),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA5),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA6),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA7),
IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA2),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA10),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA11),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA8),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA9),
IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA3),
IMX_PINCTRL_PIN(MX51_PAD_GPIO1_0),
IMX_PINCTRL_PIN(MX51_PAD_GPIO1_1),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA12),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA13),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA14),
- IMX_PINCTRL_PIN(MX51_PAD_EIM_DA15),
IMX_PINCTRL_PIN(MX51_PAD_SD2_CMD),
IMX_PINCTRL_PIN(MX51_PAD_SD2_CLK),
IMX_PINCTRL_PIN(MX51_PAD_SD2_DATA0),
@@ -1277,13 +646,126 @@ static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX51_PAD_GPIO1_7),
IMX_PINCTRL_PIN(MX51_PAD_GPIO1_8),
IMX_PINCTRL_PIN(MX51_PAD_GPIO1_9),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE11),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE16),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE17),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE18),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE19),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE20),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE21),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE22),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE23),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE24),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE25),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE26),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE27),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE28),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE29),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE30),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE31),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE32),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE33),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE34),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE35),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE36),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE37),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE38),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE39),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE40),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE41),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE42),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE43),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE44),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE45),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE46),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE47),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE48),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE49),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE50),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE51),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE52),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE53),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE54),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE55),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE56),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE57),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE58),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE59),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE60),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE61),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE62),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE63),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE64),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE65),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE66),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE67),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE68),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE69),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE70),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE71),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE72),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE73),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE74),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE75),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE76),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE77),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE78),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE79),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE80),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE81),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE82),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE83),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE84),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE85),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE86),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE87),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE88),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE89),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE90),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE91),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE92),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE93),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE94),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE95),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE96),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE97),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE98),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE99),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE100),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE101),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE102),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE103),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE104),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE105),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE106),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE107),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE108),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE109),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE110),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE111),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE112),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE113),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE114),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE115),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE116),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE117),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE118),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE119),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE120),
+ IMX_PINCTRL_PIN(MX51_PAD_RESERVE121),
+ IMX_PINCTRL_PIN(MX51_PAD_CSI1_PIXCLK),
+ IMX_PINCTRL_PIN(MX51_PAD_CSI1_MCLK),
};
static struct imx_pinctrl_soc_info imx51_pinctrl_info = {
.pins = imx51_pinctrl_pads,
.npins = ARRAY_SIZE(imx51_pinctrl_pads),
- .pin_regs = imx51_pin_regs,
- .npin_regs = ARRAY_SIZE(imx51_pin_regs),
};
static struct of_device_id imx51_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/pinctrl-imx53.c b/drivers/pinctrl/pinctrl-imx53.c
index 2c9c8e2..17562ae 100644
--- a/drivers/pinctrl/pinctrl-imx53.c
+++ b/drivers/pinctrl/pinctrl-imx53.c
@@ -23,1386 +23,228 @@
#include "pinctrl-imx.h"
enum imx53_pads {
- MX53_PAD_GPIO_19 = 0,
- MX53_PAD_KEY_COL0 = 1,
- MX53_PAD_KEY_ROW0 = 2,
- MX53_PAD_KEY_COL1 = 3,
- MX53_PAD_KEY_ROW1 = 4,
- MX53_PAD_KEY_COL2 = 5,
- MX53_PAD_KEY_ROW2 = 6,
- MX53_PAD_KEY_COL3 = 7,
- MX53_PAD_KEY_ROW3 = 8,
- MX53_PAD_KEY_COL4 = 9,
- MX53_PAD_KEY_ROW4 = 10,
- MX53_PAD_DI0_DISP_CLK = 11,
- MX53_PAD_DI0_PIN15 = 12,
- MX53_PAD_DI0_PIN2 = 13,
- MX53_PAD_DI0_PIN3 = 14,
- MX53_PAD_DI0_PIN4 = 15,
- MX53_PAD_DISP0_DAT0 = 16,
- MX53_PAD_DISP0_DAT1 = 17,
- MX53_PAD_DISP0_DAT2 = 18,
- MX53_PAD_DISP0_DAT3 = 19,
- MX53_PAD_DISP0_DAT4 = 20,
- MX53_PAD_DISP0_DAT5 = 21,
- MX53_PAD_DISP0_DAT6 = 22,
- MX53_PAD_DISP0_DAT7 = 23,
- MX53_PAD_DISP0_DAT8 = 24,
- MX53_PAD_DISP0_DAT9 = 25,
- MX53_PAD_DISP0_DAT10 = 26,
- MX53_PAD_DISP0_DAT11 = 27,
- MX53_PAD_DISP0_DAT12 = 28,
- MX53_PAD_DISP0_DAT13 = 29,
- MX53_PAD_DISP0_DAT14 = 30,
- MX53_PAD_DISP0_DAT15 = 31,
- MX53_PAD_DISP0_DAT16 = 32,
- MX53_PAD_DISP0_DAT17 = 33,
- MX53_PAD_DISP0_DAT18 = 34,
- MX53_PAD_DISP0_DAT19 = 35,
- MX53_PAD_DISP0_DAT20 = 36,
- MX53_PAD_DISP0_DAT21 = 37,
- MX53_PAD_DISP0_DAT22 = 38,
- MX53_PAD_DISP0_DAT23 = 39,
- MX53_PAD_CSI0_PIXCLK = 40,
- MX53_PAD_CSI0_MCLK = 41,
- MX53_PAD_CSI0_DATA_EN = 42,
- MX53_PAD_CSI0_VSYNC = 43,
- MX53_PAD_CSI0_DAT4 = 44,
- MX53_PAD_CSI0_DAT5 = 45,
- MX53_PAD_CSI0_DAT6 = 46,
- MX53_PAD_CSI0_DAT7 = 47,
- MX53_PAD_CSI0_DAT8 = 48,
- MX53_PAD_CSI0_DAT9 = 49,
- MX53_PAD_CSI0_DAT10 = 50,
- MX53_PAD_CSI0_DAT11 = 51,
- MX53_PAD_CSI0_DAT12 = 52,
- MX53_PAD_CSI0_DAT13 = 53,
- MX53_PAD_CSI0_DAT14 = 54,
- MX53_PAD_CSI0_DAT15 = 55,
- MX53_PAD_CSI0_DAT16 = 56,
- MX53_PAD_CSI0_DAT17 = 57,
- MX53_PAD_CSI0_DAT18 = 58,
- MX53_PAD_CSI0_DAT19 = 59,
- MX53_PAD_EIM_A25 = 60,
- MX53_PAD_EIM_EB2 = 61,
- MX53_PAD_EIM_D16 = 62,
- MX53_PAD_EIM_D17 = 63,
- MX53_PAD_EIM_D18 = 64,
- MX53_PAD_EIM_D19 = 65,
- MX53_PAD_EIM_D20 = 66,
- MX53_PAD_EIM_D21 = 67,
- MX53_PAD_EIM_D22 = 68,
- MX53_PAD_EIM_D23 = 69,
- MX53_PAD_EIM_EB3 = 70,
- MX53_PAD_EIM_D24 = 71,
- MX53_PAD_EIM_D25 = 72,
- MX53_PAD_EIM_D26 = 73,
- MX53_PAD_EIM_D27 = 74,
- MX53_PAD_EIM_D28 = 75,
- MX53_PAD_EIM_D29 = 76,
- MX53_PAD_EIM_D30 = 77,
- MX53_PAD_EIM_D31 = 78,
- MX53_PAD_EIM_A24 = 79,
- MX53_PAD_EIM_A23 = 80,
- MX53_PAD_EIM_A22 = 81,
- MX53_PAD_EIM_A21 = 82,
- MX53_PAD_EIM_A20 = 83,
- MX53_PAD_EIM_A19 = 84,
- MX53_PAD_EIM_A18 = 85,
- MX53_PAD_EIM_A17 = 86,
- MX53_PAD_EIM_A16 = 87,
- MX53_PAD_EIM_CS0 = 88,
- MX53_PAD_EIM_CS1 = 89,
- MX53_PAD_EIM_OE = 90,
- MX53_PAD_EIM_RW = 91,
- MX53_PAD_EIM_LBA = 92,
- MX53_PAD_EIM_EB0 = 93,
- MX53_PAD_EIM_EB1 = 94,
- MX53_PAD_EIM_DA0 = 95,
- MX53_PAD_EIM_DA1 = 96,
- MX53_PAD_EIM_DA2 = 97,
- MX53_PAD_EIM_DA3 = 98,
- MX53_PAD_EIM_DA4 = 99,
- MX53_PAD_EIM_DA5 = 100,
- MX53_PAD_EIM_DA6 = 101,
- MX53_PAD_EIM_DA7 = 102,
- MX53_PAD_EIM_DA8 = 103,
- MX53_PAD_EIM_DA9 = 104,
- MX53_PAD_EIM_DA10 = 105,
- MX53_PAD_EIM_DA11 = 106,
- MX53_PAD_EIM_DA12 = 107,
- MX53_PAD_EIM_DA13 = 108,
- MX53_PAD_EIM_DA14 = 109,
- MX53_PAD_EIM_DA15 = 110,
- MX53_PAD_NANDF_WE_B = 111,
- MX53_PAD_NANDF_RE_B = 112,
- MX53_PAD_EIM_WAIT = 113,
- MX53_PAD_LVDS1_TX3_P = 114,
- MX53_PAD_LVDS1_TX2_P = 115,
- MX53_PAD_LVDS1_CLK_P = 116,
- MX53_PAD_LVDS1_TX1_P = 117,
- MX53_PAD_LVDS1_TX0_P = 118,
- MX53_PAD_LVDS0_TX3_P = 119,
- MX53_PAD_LVDS0_CLK_P = 120,
- MX53_PAD_LVDS0_TX2_P = 121,
- MX53_PAD_LVDS0_TX1_P = 122,
- MX53_PAD_LVDS0_TX0_P = 123,
- MX53_PAD_GPIO_10 = 124,
- MX53_PAD_GPIO_11 = 125,
- MX53_PAD_GPIO_12 = 126,
- MX53_PAD_GPIO_13 = 127,
- MX53_PAD_GPIO_14 = 128,
- MX53_PAD_NANDF_CLE = 129,
- MX53_PAD_NANDF_ALE = 130,
- MX53_PAD_NANDF_WP_B = 131,
- MX53_PAD_NANDF_RB0 = 132,
- MX53_PAD_NANDF_CS0 = 133,
- MX53_PAD_NANDF_CS1 = 134,
- MX53_PAD_NANDF_CS2 = 135,
- MX53_PAD_NANDF_CS3 = 136,
- MX53_PAD_FEC_MDIO = 137,
- MX53_PAD_FEC_REF_CLK = 138,
- MX53_PAD_FEC_RX_ER = 139,
- MX53_PAD_FEC_CRS_DV = 140,
- MX53_PAD_FEC_RXD1 = 141,
- MX53_PAD_FEC_RXD0 = 142,
- MX53_PAD_FEC_TX_EN = 143,
- MX53_PAD_FEC_TXD1 = 144,
- MX53_PAD_FEC_TXD0 = 145,
- MX53_PAD_FEC_MDC = 146,
- MX53_PAD_PATA_DIOW = 147,
- MX53_PAD_PATA_DMACK = 148,
- MX53_PAD_PATA_DMARQ = 149,
- MX53_PAD_PATA_BUFFER_EN = 150,
- MX53_PAD_PATA_INTRQ = 151,
- MX53_PAD_PATA_DIOR = 152,
- MX53_PAD_PATA_RESET_B = 153,
- MX53_PAD_PATA_IORDY = 154,
- MX53_PAD_PATA_DA_0 = 155,
- MX53_PAD_PATA_DA_1 = 156,
- MX53_PAD_PATA_DA_2 = 157,
- MX53_PAD_PATA_CS_0 = 158,
- MX53_PAD_PATA_CS_1 = 159,
- MX53_PAD_PATA_DATA0 = 160,
- MX53_PAD_PATA_DATA1 = 161,
- MX53_PAD_PATA_DATA2 = 162,
- MX53_PAD_PATA_DATA3 = 163,
- MX53_PAD_PATA_DATA4 = 164,
- MX53_PAD_PATA_DATA5 = 165,
- MX53_PAD_PATA_DATA6 = 166,
- MX53_PAD_PATA_DATA7 = 167,
- MX53_PAD_PATA_DATA8 = 168,
- MX53_PAD_PATA_DATA9 = 169,
- MX53_PAD_PATA_DATA10 = 170,
- MX53_PAD_PATA_DATA11 = 171,
- MX53_PAD_PATA_DATA12 = 172,
- MX53_PAD_PATA_DATA13 = 173,
- MX53_PAD_PATA_DATA14 = 174,
- MX53_PAD_PATA_DATA15 = 175,
- MX53_PAD_SD1_DATA0 = 176,
- MX53_PAD_SD1_DATA1 = 177,
- MX53_PAD_SD1_CMD = 178,
- MX53_PAD_SD1_DATA2 = 179,
- MX53_PAD_SD1_CLK = 180,
- MX53_PAD_SD1_DATA3 = 181,
- MX53_PAD_SD2_CLK = 182,
- MX53_PAD_SD2_CMD = 183,
- MX53_PAD_SD2_DATA3 = 184,
- MX53_PAD_SD2_DATA2 = 185,
- MX53_PAD_SD2_DATA1 = 186,
- MX53_PAD_SD2_DATA0 = 187,
- MX53_PAD_GPIO_0 = 188,
- MX53_PAD_GPIO_1 = 189,
- MX53_PAD_GPIO_9 = 190,
- MX53_PAD_GPIO_3 = 191,
- MX53_PAD_GPIO_6 = 192,
- MX53_PAD_GPIO_2 = 193,
- MX53_PAD_GPIO_4 = 194,
- MX53_PAD_GPIO_5 = 195,
- MX53_PAD_GPIO_7 = 196,
- MX53_PAD_GPIO_8 = 197,
- MX53_PAD_GPIO_16 = 198,
- MX53_PAD_GPIO_17 = 199,
- MX53_PAD_GPIO_18 = 200,
-};
-
-/* imx53 register maps */
-static struct imx_pin_reg imx53_pin_regs[] = {
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 0, 0x840, 0), /* MX53_PAD_GPIO_19__KPP_COL_5 */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 1, 0x000, 0), /* MX53_PAD_GPIO_19__GPIO4_5 */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 2, 0x000, 0), /* MX53_PAD_GPIO_19__CCM_CLKO */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 3, 0x000, 0), /* MX53_PAD_GPIO_19__SPDIF_OUT1 */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 4, 0x000, 0), /* MX53_PAD_GPIO_19__RTC_CE_RTC_EXT_TRIG2 */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 5, 0x000, 0), /* MX53_PAD_GPIO_19__ECSPI1_RDY */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 6, 0x000, 0), /* MX53_PAD_GPIO_19__FEC_TDATA_3 */
- IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 7, 0x000, 0), /* MX53_PAD_GPIO_19__SRC_INT_BOOT */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 0, 0x000, 0), /* MX53_PAD_KEY_COL0__KPP_COL_0 */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 1, 0x000, 0), /* MX53_PAD_KEY_COL0__GPIO4_6 */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 2, 0x758, 0), /* MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 4, 0x000, 0), /* MX53_PAD_KEY_COL0__UART4_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 5, 0x79C, 0), /* MX53_PAD_KEY_COL0__ECSPI1_SCLK */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 6, 0x000, 0), /* MX53_PAD_KEY_COL0__FEC_RDATA_3 */
- IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 7, 0x000, 0), /* MX53_PAD_KEY_COL0__SRC_ANY_PU_RST */
- IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 0, 0x000, 0), /* MX53_PAD_KEY_ROW0__KPP_ROW_0 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 1, 0x000, 0), /* MX53_PAD_KEY_ROW0__GPIO4_7 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 2, 0x74C, 0), /* MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD */
- IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 4, 0x890, 1), /* MX53_PAD_KEY_ROW0__UART4_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 5, 0x7A4, 0), /* MX53_PAD_KEY_ROW0__ECSPI1_MOSI */
- IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 6, 0x000, 0), /* MX53_PAD_KEY_ROW0__FEC_TX_ER */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 0, 0x000, 0), /* MX53_PAD_KEY_COL1__KPP_COL_1 */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 1, 0x000, 0), /* MX53_PAD_KEY_COL1__GPIO4_8 */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 2, 0x75C, 0), /* MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 4, 0x000, 0), /* MX53_PAD_KEY_COL1__UART5_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 5, 0x7A0, 0), /* MX53_PAD_KEY_COL1__ECSPI1_MISO */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 6, 0x808, 0), /* MX53_PAD_KEY_COL1__FEC_RX_CLK */
- IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 7, 0x000, 0), /* MX53_PAD_KEY_COL1__USBPHY1_TXREADY */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 0, 0x000, 0), /* MX53_PAD_KEY_ROW1__KPP_ROW_1 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 1, 0x000, 0), /* MX53_PAD_KEY_ROW1__GPIO4_9 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 2, 0x748, 0), /* MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 4, 0x898, 1), /* MX53_PAD_KEY_ROW1__UART5_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 5, 0x7A8, 0), /* MX53_PAD_KEY_ROW1__ECSPI1_SS0 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 6, 0x800, 0), /* MX53_PAD_KEY_ROW1__FEC_COL */
- IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 7, 0x000, 0), /* MX53_PAD_KEY_ROW1__USBPHY1_RXVALID */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 0, 0x000, 0), /* MX53_PAD_KEY_COL2__KPP_COL_2 */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 1, 0x000, 0), /* MX53_PAD_KEY_COL2__GPIO4_10 */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 2, 0x000, 0), /* MX53_PAD_KEY_COL2__CAN1_TXCAN */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 4, 0x804, 0), /* MX53_PAD_KEY_COL2__FEC_MDIO */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 5, 0x7AC, 0), /* MX53_PAD_KEY_COL2__ECSPI1_SS1 */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 6, 0x000, 0), /* MX53_PAD_KEY_COL2__FEC_RDATA_2 */
- IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 7, 0x000, 0), /* MX53_PAD_KEY_COL2__USBPHY1_RXACTIVE */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 0, 0x000, 0), /* MX53_PAD_KEY_ROW2__KPP_ROW_2 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 1, 0x000, 0), /* MX53_PAD_KEY_ROW2__GPIO4_11 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 2, 0x760, 0), /* MX53_PAD_KEY_ROW2__CAN1_RXCAN */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 4, 0x000, 0), /* MX53_PAD_KEY_ROW2__FEC_MDC */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 5, 0x7B0, 0), /* MX53_PAD_KEY_ROW2__ECSPI1_SS2 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 6, 0x000, 0), /* MX53_PAD_KEY_ROW2__FEC_TDATA_2 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 7, 0x000, 0), /* MX53_PAD_KEY_ROW2__USBPHY1_RXERROR */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 0, 0x000, 0), /* MX53_PAD_KEY_COL3__KPP_COL_3 */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 1, 0x000, 0), /* MX53_PAD_KEY_COL3__GPIO4_12 */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 2, 0x000, 0), /* MX53_PAD_KEY_COL3__USBOH3_H2_DP */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 3, 0x870, 0), /* MX53_PAD_KEY_COL3__SPDIF_IN1 */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 4, 0x81C, 0), /* MX53_PAD_KEY_COL3__I2C2_SCL */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 5, 0x7B4, 0), /* MX53_PAD_KEY_COL3__ECSPI1_SS3 */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 6, 0x000, 0), /* MX53_PAD_KEY_COL3__FEC_CRS */
- IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 7, 0x000, 0), /* MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 0, 0x000, 0), /* MX53_PAD_KEY_ROW3__KPP_ROW_3 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 1, 0x000, 0), /* MX53_PAD_KEY_ROW3__GPIO4_13 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 2, 0x000, 0), /* MX53_PAD_KEY_ROW3__USBOH3_H2_DM */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 3, 0x768, 0), /* MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 4, 0x820, 0), /* MX53_PAD_KEY_ROW3__I2C2_SDA */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 5, 0x000, 0), /* MX53_PAD_KEY_ROW3__OSC32K_32K_OUT */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 6, 0x77C, 0), /* MX53_PAD_KEY_ROW3__CCM_PLL4_BYP */
- IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 7, 0x000, 0), /* MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 0, 0x000, 0), /* MX53_PAD_KEY_COL4__KPP_COL_4 */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 1, 0x000, 0), /* MX53_PAD_KEY_COL4__GPIO4_14 */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 2, 0x000, 0), /* MX53_PAD_KEY_COL4__CAN2_TXCAN */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 3, 0x000, 0), /* MX53_PAD_KEY_COL4__IPU_SISG_4 */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 4, 0x894, 0), /* MX53_PAD_KEY_COL4__UART5_RTS */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 5, 0x89C, 0), /* MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC */
- IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 7, 0x000, 0), /* MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 0, 0x000, 0), /* MX53_PAD_KEY_ROW4__KPP_ROW_4 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 1, 0x000, 0), /* MX53_PAD_KEY_ROW4__GPIO4_15 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 2, 0x764, 0), /* MX53_PAD_KEY_ROW4__CAN2_RXCAN */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 3, 0x000, 0), /* MX53_PAD_KEY_ROW4__IPU_SISG_5 */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 4, 0x000, 0), /* MX53_PAD_KEY_ROW4__UART5_CTS */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 5, 0x000, 0), /* MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR */
- IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 7, 0x000, 0), /* MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID */
- IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 0, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK */
- IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 1, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__GPIO4_16 */
- IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 2, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__USBOH3_USBH2_DIR */
- IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 5, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__SDMA_DEBUG_CORE_STATE_0 */
- IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 6, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__EMI_EMI_DEBUG_0 */
- IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 7, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__USBPHY1_AVALID */
- IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 0, 0x000, 0), /* MX53_PAD_DI0_PIN15__IPU_DI0_PIN15 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 1, 0x000, 0), /* MX53_PAD_DI0_PIN15__GPIO4_17 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 2, 0x000, 0), /* MX53_PAD_DI0_PIN15__AUDMUX_AUD6_TXC */
- IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 5, 0x000, 0), /* MX53_PAD_DI0_PIN15__SDMA_DEBUG_CORE_STATE_1 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 6, 0x000, 0), /* MX53_PAD_DI0_PIN15__EMI_EMI_DEBUG_1 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 7, 0x000, 0), /* MX53_PAD_DI0_PIN15__USBPHY1_BVALID */
- IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 0, 0x000, 0), /* MX53_PAD_DI0_PIN2__IPU_DI0_PIN2 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 1, 0x000, 0), /* MX53_PAD_DI0_PIN2__GPIO4_18 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 2, 0x000, 0), /* MX53_PAD_DI0_PIN2__AUDMUX_AUD6_TXD */
- IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 5, 0x000, 0), /* MX53_PAD_DI0_PIN2__SDMA_DEBUG_CORE_STATE_2 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 6, 0x000, 0), /* MX53_PAD_DI0_PIN2__EMI_EMI_DEBUG_2 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 7, 0x000, 0), /* MX53_PAD_DI0_PIN2__USBPHY1_ENDSESSION */
- IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 0, 0x000, 0), /* MX53_PAD_DI0_PIN3__IPU_DI0_PIN3 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 1, 0x000, 0), /* MX53_PAD_DI0_PIN3__GPIO4_19 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 2, 0x000, 0), /* MX53_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS */
- IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 5, 0x000, 0), /* MX53_PAD_DI0_PIN3__SDMA_DEBUG_CORE_STATE_3 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 6, 0x000, 0), /* MX53_PAD_DI0_PIN3__EMI_EMI_DEBUG_3 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 7, 0x000, 0), /* MX53_PAD_DI0_PIN3__USBPHY1_IDDIG */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 0, 0x000, 0), /* MX53_PAD_DI0_PIN4__IPU_DI0_PIN4 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 1, 0x000, 0), /* MX53_PAD_DI0_PIN4__GPIO4_20 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 2, 0x000, 0), /* MX53_PAD_DI0_PIN4__AUDMUX_AUD6_RXD */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 3, 0x7FC, 0), /* MX53_PAD_DI0_PIN4__ESDHC1_WP */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 5, 0x000, 0), /* MX53_PAD_DI0_PIN4__SDMA_DEBUG_YIELD */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 6, 0x000, 0), /* MX53_PAD_DI0_PIN4__EMI_EMI_DEBUG_4 */
- IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 7, 0x000, 0), /* MX53_PAD_DI0_PIN4__USBPHY1_HOSTDISCONNECT */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT0__GPIO4_21 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 2, 0x780, 0), /* MX53_PAD_DISP0_DAT0__CSPI_SCLK */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT0__USBOH3_USBH2_DATA_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT0__SDMA_DEBUG_CORE_RUN */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT0__EMI_EMI_DEBUG_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT0__USBPHY2_TXREADY */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT1__GPIO4_22 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 2, 0x788, 0), /* MX53_PAD_DISP0_DAT1__CSPI_MOSI */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT1__USBOH3_USBH2_DATA_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT1__SDMA_DEBUG_EVENT_CHANNEL_SEL */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT1__EMI_EMI_DEBUG_6 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT1__USBPHY2_RXVALID */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT2__GPIO4_23 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 2, 0x784, 0), /* MX53_PAD_DISP0_DAT2__CSPI_MISO */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT2__USBOH3_USBH2_DATA_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT2__SDMA_DEBUG_MODE */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT2__EMI_EMI_DEBUG_7 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT2__USBPHY2_RXACTIVE */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT3__GPIO4_24 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 2, 0x78C, 0), /* MX53_PAD_DISP0_DAT3__CSPI_SS0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT3__USBOH3_USBH2_DATA_3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT3__SDMA_DEBUG_BUS_ERROR */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT3__EMI_EMI_DEBUG_8 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT3__USBPHY2_RXERROR */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT4__GPIO4_25 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 2, 0x790, 0), /* MX53_PAD_DISP0_DAT4__CSPI_SS1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT4__USBOH3_USBH2_DATA_4 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT4__EMI_EMI_DEBUG_9 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT4__USBPHY2_SIECLOCK */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT5__GPIO4_26 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 2, 0x794, 0), /* MX53_PAD_DISP0_DAT5__CSPI_SS2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT5__USBOH3_USBH2_DATA_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT5__SDMA_DEBUG_MATCHED_DMBUS */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT5__EMI_EMI_DEBUG_10 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT5__USBPHY2_LINESTATE_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT6__GPIO4_27 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 2, 0x798, 0), /* MX53_PAD_DISP0_DAT6__CSPI_SS3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT6__USBOH3_USBH2_DATA_6 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT6__SDMA_DEBUG_RTBUFFER_WRITE */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT6__EMI_EMI_DEBUG_11 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT6__USBPHY2_LINESTATE_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT7__GPIO4_28 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT7__CSPI_RDY */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT7__USBOH3_USBH2_DATA_7 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT7__SDMA_DEBUG_EVENT_CHANNEL_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT7__EMI_EMI_DEBUG_12 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT7__USBPHY2_VBUSVALID */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT8__GPIO4_29 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT8__PWM1_PWMO */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT8__WDOG1_WDOG_B */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT8__SDMA_DEBUG_EVENT_CHANNEL_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT8__EMI_EMI_DEBUG_13 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT8__USBPHY2_AVALID */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT9__GPIO4_30 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT9__PWM2_PWMO */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT9__WDOG2_WDOG_B */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT9__SDMA_DEBUG_EVENT_CHANNEL_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT9__EMI_EMI_DEBUG_14 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT9__USBPHY2_VSTATUS_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT10__GPIO4_31 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT10__USBOH3_USBH2_STP */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT10__SDMA_DEBUG_EVENT_CHANNEL_3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT10__EMI_EMI_DEBUG_15 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT10__USBPHY2_VSTATUS_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT11__GPIO5_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT11__USBOH3_USBH2_NXT */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT11__SDMA_DEBUG_EVENT_CHANNEL_4 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT11__EMI_EMI_DEBUG_16 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT11__USBPHY2_VSTATUS_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT12__GPIO5_6 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT12__USBOH3_USBH2_CLK */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT12__SDMA_DEBUG_EVENT_CHANNEL_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT12__EMI_EMI_DEBUG_17 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT12__USBPHY2_VSTATUS_3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT13__GPIO5_7 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 3, 0x754, 0), /* MX53_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT13__SDMA_DEBUG_EVT_CHN_LINES_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT13__EMI_EMI_DEBUG_18 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT13__USBPHY2_VSTATUS_4 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT14__GPIO5_8 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 3, 0x750, 0), /* MX53_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT14__SDMA_DEBUG_EVT_CHN_LINES_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT14__EMI_EMI_DEBUG_19 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT14__USBPHY2_VSTATUS_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT15__GPIO5_9 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 2, 0x7AC, 1), /* MX53_PAD_DISP0_DAT15__ECSPI1_SS1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 3, 0x7C8, 0), /* MX53_PAD_DISP0_DAT15__ECSPI2_SS1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT15__SDMA_DEBUG_EVT_CHN_LINES_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT15__EMI_EMI_DEBUG_20 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT15__USBPHY2_VSTATUS_6 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT16__GPIO5_10 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 2, 0x7C0, 0), /* MX53_PAD_DISP0_DAT16__ECSPI2_MOSI */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 3, 0x758, 1), /* MX53_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 4, 0x868, 0), /* MX53_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT16__SDMA_DEBUG_EVT_CHN_LINES_3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT16__EMI_EMI_DEBUG_21 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT16__USBPHY2_VSTATUS_7 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT17__GPIO5_11 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 2, 0x7BC, 0), /* MX53_PAD_DISP0_DAT17__ECSPI2_MISO */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 3, 0x74C, 1), /* MX53_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 4, 0x86C, 0), /* MX53_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT17__SDMA_DEBUG_EVT_CHN_LINES_4 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT17__EMI_EMI_DEBUG_22 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT18__GPIO5_12 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 2, 0x7C4, 0), /* MX53_PAD_DISP0_DAT18__ECSPI2_SS0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 3, 0x75C, 1), /* MX53_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 4, 0x73C, 0), /* MX53_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT18__SDMA_DEBUG_EVT_CHN_LINES_5 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT18__EMI_EMI_DEBUG_23 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT18__EMI_WEIM_CS_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT19__GPIO5_13 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 2, 0x7B8, 0), /* MX53_PAD_DISP0_DAT19__ECSPI2_SCLK */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 3, 0x748, 1), /* MX53_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 4, 0x738, 0), /* MX53_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT19__SDMA_DEBUG_EVT_CHN_LINES_6 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT19__EMI_EMI_DEBUG_24 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT19__EMI_WEIM_CS_3 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT20__GPIO5_14 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 2, 0x79C, 1), /* MX53_PAD_DISP0_DAT20__ECSPI1_SCLK */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 3, 0x740, 0), /* MX53_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT20__SDMA_DEBUG_EVT_CHN_LINES_7 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT20__EMI_EMI_DEBUG_25 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT20__SATA_PHY_TDI */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT21__GPIO5_15 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 2, 0x7A4, 1), /* MX53_PAD_DISP0_DAT21__ECSPI1_MOSI */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 3, 0x734, 0), /* MX53_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT21__SDMA_DEBUG_BUS_DEVICE_0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT21__EMI_EMI_DEBUG_26 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT21__SATA_PHY_TDO */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT22__GPIO5_16 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 2, 0x7A0, 1), /* MX53_PAD_DISP0_DAT22__ECSPI1_MISO */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 3, 0x744, 0), /* MX53_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT22__SDMA_DEBUG_BUS_DEVICE_1 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT22__EMI_EMI_DEBUG_27 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT22__SATA_PHY_TCK */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT23__GPIO5_17 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 2, 0x7A8, 1), /* MX53_PAD_DISP0_DAT23__ECSPI1_SS0 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 3, 0x730, 0), /* MX53_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT23__SDMA_DEBUG_BUS_DEVICE_2 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT23__EMI_EMI_DEBUG_28 */
- IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT23__SATA_PHY_TMS */
- IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 0, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK */
- IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 1, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__GPIO5_18 */
- IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 5, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 */
- IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 6, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__EMI_EMI_DEBUG_29 */
- IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 0, 0x000, 0), /* MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC */
- IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 1, 0x000, 0), /* MX53_PAD_CSI0_MCLK__GPIO5_19 */
- IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 2, 0x000, 0), /* MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK */
- IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 5, 0x000, 0), /* MX53_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 */
- IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 6, 0x000, 0), /* MX53_PAD_CSI0_MCLK__EMI_EMI_DEBUG_30 */
- IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 7, 0x000, 0), /* MX53_PAD_CSI0_MCLK__TPIU_TRCTL */
- IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 0, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN */
- IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 1, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__GPIO5_20 */
- IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 5, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 */
- IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 6, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__EMI_EMI_DEBUG_31 */
- IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 7, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__TPIU_TRCLK */
- IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 0, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC */
- IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 1, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__GPIO5_21 */
- IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 5, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 */
- IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 6, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__EMI_EMI_DEBUG_32 */
- IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 7, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__TPIU_TRACE_0 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT4__GPIO5_22 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 2, 0x840, 1), /* MX53_PAD_CSI0_DAT4__KPP_COL_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 3, 0x79C, 2), /* MX53_PAD_CSI0_DAT4__ECSPI1_SCLK */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT4__USBOH3_USBH3_STP */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT4__EMI_EMI_DEBUG_33 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT4__TPIU_TRACE_1 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT5__GPIO5_23 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 2, 0x84C, 0), /* MX53_PAD_CSI0_DAT5__KPP_ROW_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 3, 0x7A4, 2), /* MX53_PAD_CSI0_DAT5__ECSPI1_MOSI */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT5__USBOH3_USBH3_NXT */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT5__EMI_EMI_DEBUG_34 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT5__TPIU_TRACE_2 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT6__GPIO5_24 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 2, 0x844, 0), /* MX53_PAD_CSI0_DAT6__KPP_COL_6 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 3, 0x7A0, 2), /* MX53_PAD_CSI0_DAT6__ECSPI1_MISO */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT6__USBOH3_USBH3_CLK */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT6__EMI_EMI_DEBUG_35 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT6__TPIU_TRACE_3 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT7__GPIO5_25 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 2, 0x850, 0), /* MX53_PAD_CSI0_DAT7__KPP_ROW_6 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 3, 0x7A8, 2), /* MX53_PAD_CSI0_DAT7__ECSPI1_SS0 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT7__USBOH3_USBH3_DIR */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT7__EMI_EMI_DEBUG_36 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT7__TPIU_TRACE_4 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT8__GPIO5_26 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 2, 0x848, 0), /* MX53_PAD_CSI0_DAT8__KPP_COL_7 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 3, 0x7B8, 1), /* MX53_PAD_CSI0_DAT8__ECSPI2_SCLK */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 5, 0x818, 0), /* MX53_PAD_CSI0_DAT8__I2C1_SDA */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT9__GPIO5_27 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 2, 0x854, 0), /* MX53_PAD_CSI0_DAT9__KPP_ROW_7 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 3, 0x7C0, 1), /* MX53_PAD_CSI0_DAT9__ECSPI2_MOSI */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 5, 0x814, 0), /* MX53_PAD_CSI0_DAT9__I2C1_SCL */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT10__GPIO5_28 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT10__UART1_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 3, 0x7BC, 1), /* MX53_PAD_CSI0_DAT10__ECSPI2_MISO */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT10__EMI_EMI_DEBUG_39 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT10__TPIU_TRACE_7 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT11__GPIO5_29 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 2, 0x878, 1), /* MX53_PAD_CSI0_DAT11__UART1_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 3, 0x7C4, 1), /* MX53_PAD_CSI0_DAT11__ECSPI2_SS0 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT11__EMI_EMI_DEBUG_40 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT12__GPIO5_30 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT12__UART4_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT13__GPIO5_31 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 2, 0x890, 3), /* MX53_PAD_CSI0_DAT13__UART4_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT14__GPIO6_0 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT14__UART5_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT15__GPIO6_1 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 2, 0x898, 3), /* MX53_PAD_CSI0_DAT15__UART5_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT16__GPIO6_2 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 2, 0x88C, 0), /* MX53_PAD_CSI0_DAT16__UART4_RTS */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT17__GPIO6_3 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT17__UART4_CTS */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT18__GPIO6_4 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 2, 0x894, 2), /* MX53_PAD_CSI0_DAT18__UART5_RTS */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT19__GPIO6_5 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT19__UART5_CTS */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 */
- IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT19__USBPHY2_BISTOK */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 0, 0x000, 0), /* MX53_PAD_EIM_A25__EMI_WEIM_A_25 */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 1, 0x000, 0), /* MX53_PAD_EIM_A25__GPIO5_2 */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 2, 0x000, 0), /* MX53_PAD_EIM_A25__ECSPI2_RDY */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 3, 0x000, 0), /* MX53_PAD_EIM_A25__IPU_DI1_PIN12 */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 4, 0x790, 1), /* MX53_PAD_EIM_A25__CSPI_SS1 */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 6, 0x000, 0), /* MX53_PAD_EIM_A25__IPU_DI0_D1_CS */
- IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 7, 0x000, 0), /* MX53_PAD_EIM_A25__USBPHY1_BISTOK */
- IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 0, 0x000, 0), /* MX53_PAD_EIM_EB2__EMI_WEIM_EB_2 */
- IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 1, 0x000, 0), /* MX53_PAD_EIM_EB2__GPIO2_30 */
- IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 2, 0x76C, 0), /* MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK */
- IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 3, 0x000, 0), /* MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS */
- IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 4, 0x7A8, 3), /* MX53_PAD_EIM_EB2__ECSPI1_SS0 */
- IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 5, 0x81C, 1), /* MX53_PAD_EIM_EB2__I2C2_SCL */
- IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 0, 0x000, 0), /* MX53_PAD_EIM_D16__EMI_WEIM_D_16 */
- IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 1, 0x000, 0), /* MX53_PAD_EIM_D16__GPIO3_16 */
- IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 2, 0x000, 0), /* MX53_PAD_EIM_D16__IPU_DI0_PIN5 */
- IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 3, 0x000, 0), /* MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK */
- IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 4, 0x79C, 3), /* MX53_PAD_EIM_D16__ECSPI1_SCLK */
- IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 5, 0x820, 1), /* MX53_PAD_EIM_D16__I2C2_SDA */
- IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 0, 0x000, 0), /* MX53_PAD_EIM_D17__EMI_WEIM_D_17 */
- IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 1, 0x000, 0), /* MX53_PAD_EIM_D17__GPIO3_17 */
- IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 2, 0x000, 0), /* MX53_PAD_EIM_D17__IPU_DI0_PIN6 */
- IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 3, 0x830, 0), /* MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN */
- IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 4, 0x7A0, 3), /* MX53_PAD_EIM_D17__ECSPI1_MISO */
- IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 5, 0x824, 0), /* MX53_PAD_EIM_D17__I2C3_SCL */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 0, 0x000, 0), /* MX53_PAD_EIM_D18__EMI_WEIM_D_18 */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 1, 0x000, 0), /* MX53_PAD_EIM_D18__GPIO3_18 */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 2, 0x000, 0), /* MX53_PAD_EIM_D18__IPU_DI0_PIN7 */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 3, 0x830, 1), /* MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 4, 0x7A4, 3), /* MX53_PAD_EIM_D18__ECSPI1_MOSI */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 5, 0x828, 0), /* MX53_PAD_EIM_D18__I2C3_SDA */
- IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 6, 0x000, 0), /* MX53_PAD_EIM_D18__IPU_DI1_D0_CS */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 0, 0x000, 0), /* MX53_PAD_EIM_D19__EMI_WEIM_D_19 */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 1, 0x000, 0), /* MX53_PAD_EIM_D19__GPIO3_19 */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 2, 0x000, 0), /* MX53_PAD_EIM_D19__IPU_DI0_PIN8 */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 3, 0x000, 0), /* MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 4, 0x7AC, 2), /* MX53_PAD_EIM_D19__ECSPI1_SS1 */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 5, 0x000, 0), /* MX53_PAD_EIM_D19__EPIT1_EPITO */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 6, 0x000, 0), /* MX53_PAD_EIM_D19__UART1_CTS */
- IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 7, 0x8A4, 0), /* MX53_PAD_EIM_D19__USBOH3_USBH2_OC */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 0, 0x000, 0), /* MX53_PAD_EIM_D20__EMI_WEIM_D_20 */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 1, 0x000, 0), /* MX53_PAD_EIM_D20__GPIO3_20 */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 2, 0x000, 0), /* MX53_PAD_EIM_D20__IPU_DI0_PIN16 */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 3, 0x000, 0), /* MX53_PAD_EIM_D20__IPU_SER_DISP0_CS */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 4, 0x78C, 1), /* MX53_PAD_EIM_D20__CSPI_SS0 */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 5, 0x000, 0), /* MX53_PAD_EIM_D20__EPIT2_EPITO */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 6, 0x874, 1), /* MX53_PAD_EIM_D20__UART1_RTS */
- IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 7, 0x000, 0), /* MX53_PAD_EIM_D20__USBOH3_USBH2_PWR */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 0, 0x000, 0), /* MX53_PAD_EIM_D21__EMI_WEIM_D_21 */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 1, 0x000, 0), /* MX53_PAD_EIM_D21__GPIO3_21 */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 2, 0x000, 0), /* MX53_PAD_EIM_D21__IPU_DI0_PIN17 */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 3, 0x000, 0), /* MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 4, 0x780, 1), /* MX53_PAD_EIM_D21__CSPI_SCLK */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 5, 0x814, 1), /* MX53_PAD_EIM_D21__I2C1_SCL */
- IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 6, 0x89C, 1), /* MX53_PAD_EIM_D21__USBOH3_USBOTG_OC */
- IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 0, 0x000, 0), /* MX53_PAD_EIM_D22__EMI_WEIM_D_22 */
- IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 1, 0x000, 0), /* MX53_PAD_EIM_D22__GPIO3_22 */
- IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 2, 0x000, 0), /* MX53_PAD_EIM_D22__IPU_DI0_PIN1 */
- IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 3, 0x82C, 0), /* MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN */
- IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 4, 0x784, 1), /* MX53_PAD_EIM_D22__CSPI_MISO */
- IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 6, 0x000, 0), /* MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 0, 0x000, 0), /* MX53_PAD_EIM_D23__EMI_WEIM_D_23 */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 1, 0x000, 0), /* MX53_PAD_EIM_D23__GPIO3_23 */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 2, 0x000, 0), /* MX53_PAD_EIM_D23__UART3_CTS */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 3, 0x000, 0), /* MX53_PAD_EIM_D23__UART1_DCD */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 4, 0x000, 0), /* MX53_PAD_EIM_D23__IPU_DI0_D0_CS */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 5, 0x000, 0), /* MX53_PAD_EIM_D23__IPU_DI1_PIN2 */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 6, 0x834, 0), /* MX53_PAD_EIM_D23__IPU_CSI1_DATA_EN */
- IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 7, 0x000, 0), /* MX53_PAD_EIM_D23__IPU_DI1_PIN14 */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 0, 0x000, 0), /* MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 1, 0x000, 0), /* MX53_PAD_EIM_EB3__GPIO2_31 */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 2, 0x884, 1), /* MX53_PAD_EIM_EB3__UART3_RTS */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 3, 0x000, 0), /* MX53_PAD_EIM_EB3__UART1_RI */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 5, 0x000, 0), /* MX53_PAD_EIM_EB3__IPU_DI1_PIN3 */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 6, 0x838, 0), /* MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC */
- IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 7, 0x000, 0), /* MX53_PAD_EIM_EB3__IPU_DI1_PIN16 */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 0, 0x000, 0), /* MX53_PAD_EIM_D24__EMI_WEIM_D_24 */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 1, 0x000, 0), /* MX53_PAD_EIM_D24__GPIO3_24 */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 2, 0x000, 0), /* MX53_PAD_EIM_D24__UART3_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 3, 0x7B0, 1), /* MX53_PAD_EIM_D24__ECSPI1_SS2 */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 4, 0x794, 1), /* MX53_PAD_EIM_D24__CSPI_SS2 */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 5, 0x754, 1), /* MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 6, 0x000, 0), /* MX53_PAD_EIM_D24__ECSPI2_SS2 */
- IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 7, 0x000, 0), /* MX53_PAD_EIM_D24__UART1_DTR */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 0, 0x000, 0), /* MX53_PAD_EIM_D25__EMI_WEIM_D_25 */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 1, 0x000, 0), /* MX53_PAD_EIM_D25__GPIO3_25 */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 2, 0x888, 1), /* MX53_PAD_EIM_D25__UART3_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 3, 0x7B4, 1), /* MX53_PAD_EIM_D25__ECSPI1_SS3 */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 4, 0x798, 1), /* MX53_PAD_EIM_D25__CSPI_SS3 */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 5, 0x750, 1), /* MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 6, 0x000, 0), /* MX53_PAD_EIM_D25__ECSPI2_SS3 */
- IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 7, 0x000, 0), /* MX53_PAD_EIM_D25__UART1_DSR */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 0, 0x000, 0), /* MX53_PAD_EIM_D26__EMI_WEIM_D_26 */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 1, 0x000, 0), /* MX53_PAD_EIM_D26__GPIO3_26 */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 2, 0x000, 0), /* MX53_PAD_EIM_D26__UART2_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 3, 0x80C, 0), /* MX53_PAD_EIM_D26__FIRI_RXD */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 4, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_CSI0_D_1 */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 5, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_DI1_PIN11 */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 6, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_SISG_2 */
- IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 7, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 0, 0x000, 0), /* MX53_PAD_EIM_D27__EMI_WEIM_D_27 */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 1, 0x000, 0), /* MX53_PAD_EIM_D27__GPIO3_27 */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 2, 0x880, 1), /* MX53_PAD_EIM_D27__UART2_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 3, 0x000, 0), /* MX53_PAD_EIM_D27__FIRI_TXD */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 4, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_CSI0_D_0 */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 5, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_DI1_PIN13 */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 6, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_SISG_3 */
- IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 7, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 0, 0x000, 0), /* MX53_PAD_EIM_D28__EMI_WEIM_D_28 */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 1, 0x000, 0), /* MX53_PAD_EIM_D28__GPIO3_28 */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 2, 0x000, 0), /* MX53_PAD_EIM_D28__UART2_CTS */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 3, 0x82C, 1), /* MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 4, 0x788, 1), /* MX53_PAD_EIM_D28__CSPI_MOSI */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 5, 0x818, 1), /* MX53_PAD_EIM_D28__I2C1_SDA */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 6, 0x000, 0), /* MX53_PAD_EIM_D28__IPU_EXT_TRIG */
- IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 7, 0x000, 0), /* MX53_PAD_EIM_D28__IPU_DI0_PIN13 */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 0, 0x000, 0), /* MX53_PAD_EIM_D29__EMI_WEIM_D_29 */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 1, 0x000, 0), /* MX53_PAD_EIM_D29__GPIO3_29 */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 2, 0x87C, 1), /* MX53_PAD_EIM_D29__UART2_RTS */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 3, 0x000, 0), /* MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 4, 0x78C, 2), /* MX53_PAD_EIM_D29__CSPI_SS0 */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 5, 0x000, 0), /* MX53_PAD_EIM_D29__IPU_DI1_PIN15 */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 6, 0x83C, 0), /* MX53_PAD_EIM_D29__IPU_CSI1_VSYNC */
- IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 7, 0x000, 0), /* MX53_PAD_EIM_D29__IPU_DI0_PIN14 */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 0, 0x000, 0), /* MX53_PAD_EIM_D30__EMI_WEIM_D_30 */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 1, 0x000, 0), /* MX53_PAD_EIM_D30__GPIO3_30 */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 2, 0x000, 0), /* MX53_PAD_EIM_D30__UART3_CTS */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 3, 0x000, 0), /* MX53_PAD_EIM_D30__IPU_CSI0_D_3 */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 4, 0x000, 0), /* MX53_PAD_EIM_D30__IPU_DI0_PIN11 */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 5, 0x000, 0), /* MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 6, 0x8A0, 0), /* MX53_PAD_EIM_D30__USBOH3_USBH1_OC */
- IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 7, 0x8A4, 1), /* MX53_PAD_EIM_D30__USBOH3_USBH2_OC */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 0, 0x000, 0), /* MX53_PAD_EIM_D31__EMI_WEIM_D_31 */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 1, 0x000, 0), /* MX53_PAD_EIM_D31__GPIO3_31 */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 2, 0x884, 3), /* MX53_PAD_EIM_D31__UART3_RTS */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 3, 0x000, 0), /* MX53_PAD_EIM_D31__IPU_CSI0_D_2 */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 4, 0x000, 0), /* MX53_PAD_EIM_D31__IPU_DI0_PIN12 */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 5, 0x000, 0), /* MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 6, 0x000, 0), /* MX53_PAD_EIM_D31__USBOH3_USBH1_PWR */
- IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 7, 0x000, 0), /* MX53_PAD_EIM_D31__USBOH3_USBH2_PWR */
- IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 0, 0x000, 0), /* MX53_PAD_EIM_A24__EMI_WEIM_A_24 */
- IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 1, 0x000, 0), /* MX53_PAD_EIM_A24__GPIO5_4 */
- IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 2, 0x000, 0), /* MX53_PAD_EIM_A24__IPU_DISP1_DAT_19 */
- IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 3, 0x000, 0), /* MX53_PAD_EIM_A24__IPU_CSI1_D_19 */
- IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 6, 0x000, 0), /* MX53_PAD_EIM_A24__IPU_SISG_2 */
- IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 7, 0x000, 0), /* MX53_PAD_EIM_A24__USBPHY2_BVALID */
- IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 0, 0x000, 0), /* MX53_PAD_EIM_A23__EMI_WEIM_A_23 */
- IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 1, 0x000, 0), /* MX53_PAD_EIM_A23__GPIO6_6 */
- IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 2, 0x000, 0), /* MX53_PAD_EIM_A23__IPU_DISP1_DAT_18 */
- IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 3, 0x000, 0), /* MX53_PAD_EIM_A23__IPU_CSI1_D_18 */
- IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 6, 0x000, 0), /* MX53_PAD_EIM_A23__IPU_SISG_3 */
- IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 7, 0x000, 0), /* MX53_PAD_EIM_A23__USBPHY2_ENDSESSION */
- IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 0, 0x000, 0), /* MX53_PAD_EIM_A22__EMI_WEIM_A_22 */
- IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 1, 0x000, 0), /* MX53_PAD_EIM_A22__GPIO2_16 */
- IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 2, 0x000, 0), /* MX53_PAD_EIM_A22__IPU_DISP1_DAT_17 */
- IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 3, 0x000, 0), /* MX53_PAD_EIM_A22__IPU_CSI1_D_17 */
- IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 7, 0x000, 0), /* MX53_PAD_EIM_A22__SRC_BT_CFG1_7 */
- IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 0, 0x000, 0), /* MX53_PAD_EIM_A21__EMI_WEIM_A_21 */
- IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 1, 0x000, 0), /* MX53_PAD_EIM_A21__GPIO2_17 */
- IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 2, 0x000, 0), /* MX53_PAD_EIM_A21__IPU_DISP1_DAT_16 */
- IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 3, 0x000, 0), /* MX53_PAD_EIM_A21__IPU_CSI1_D_16 */
- IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 7, 0x000, 0), /* MX53_PAD_EIM_A21__SRC_BT_CFG1_6 */
- IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 0, 0x000, 0), /* MX53_PAD_EIM_A20__EMI_WEIM_A_20 */
- IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 1, 0x000, 0), /* MX53_PAD_EIM_A20__GPIO2_18 */
- IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 2, 0x000, 0), /* MX53_PAD_EIM_A20__IPU_DISP1_DAT_15 */
- IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 3, 0x000, 0), /* MX53_PAD_EIM_A20__IPU_CSI1_D_15 */
- IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 7, 0x000, 0), /* MX53_PAD_EIM_A20__SRC_BT_CFG1_5 */
- IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 0, 0x000, 0), /* MX53_PAD_EIM_A19__EMI_WEIM_A_19 */
- IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 1, 0x000, 0), /* MX53_PAD_EIM_A19__GPIO2_19 */
- IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 2, 0x000, 0), /* MX53_PAD_EIM_A19__IPU_DISP1_DAT_14 */
- IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 3, 0x000, 0), /* MX53_PAD_EIM_A19__IPU_CSI1_D_14 */
- IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 7, 0x000, 0), /* MX53_PAD_EIM_A19__SRC_BT_CFG1_4 */
- IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 0, 0x000, 0), /* MX53_PAD_EIM_A18__EMI_WEIM_A_18 */
- IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 1, 0x000, 0), /* MX53_PAD_EIM_A18__GPIO2_20 */
- IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 2, 0x000, 0), /* MX53_PAD_EIM_A18__IPU_DISP1_DAT_13 */
- IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 3, 0x000, 0), /* MX53_PAD_EIM_A18__IPU_CSI1_D_13 */
- IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 7, 0x000, 0), /* MX53_PAD_EIM_A18__SRC_BT_CFG1_3 */
- IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 0, 0x000, 0), /* MX53_PAD_EIM_A17__EMI_WEIM_A_17 */
- IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 1, 0x000, 0), /* MX53_PAD_EIM_A17__GPIO2_21 */
- IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 2, 0x000, 0), /* MX53_PAD_EIM_A17__IPU_DISP1_DAT_12 */
- IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 3, 0x000, 0), /* MX53_PAD_EIM_A17__IPU_CSI1_D_12 */
- IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 7, 0x000, 0), /* MX53_PAD_EIM_A17__SRC_BT_CFG1_2 */
- IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 0, 0x000, 0), /* MX53_PAD_EIM_A16__EMI_WEIM_A_16 */
- IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 1, 0x000, 0), /* MX53_PAD_EIM_A16__GPIO2_22 */
- IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 2, 0x000, 0), /* MX53_PAD_EIM_A16__IPU_DI1_DISP_CLK */
- IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 3, 0x000, 0), /* MX53_PAD_EIM_A16__IPU_CSI1_PIXCLK */
- IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 7, 0x000, 0), /* MX53_PAD_EIM_A16__SRC_BT_CFG1_1 */
- IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 0, 0x000, 0), /* MX53_PAD_EIM_CS0__EMI_WEIM_CS_0 */
- IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 1, 0x000, 0), /* MX53_PAD_EIM_CS0__GPIO2_23 */
- IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 2, 0x7B8, 2), /* MX53_PAD_EIM_CS0__ECSPI2_SCLK */
- IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 3, 0x000, 0), /* MX53_PAD_EIM_CS0__IPU_DI1_PIN5 */
- IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 0, 0x000, 0), /* MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 */
- IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 1, 0x000, 0), /* MX53_PAD_EIM_CS1__GPIO2_24 */
- IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 2, 0x7C0, 2), /* MX53_PAD_EIM_CS1__ECSPI2_MOSI */
- IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 3, 0x000, 0), /* MX53_PAD_EIM_CS1__IPU_DI1_PIN6 */
- IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 0, 0x000, 0), /* MX53_PAD_EIM_OE__EMI_WEIM_OE */
- IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 1, 0x000, 0), /* MX53_PAD_EIM_OE__GPIO2_25 */
- IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 2, 0x7BC, 2), /* MX53_PAD_EIM_OE__ECSPI2_MISO */
- IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 3, 0x000, 0), /* MX53_PAD_EIM_OE__IPU_DI1_PIN7 */
- IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 7, 0x000, 0), /* MX53_PAD_EIM_OE__USBPHY2_IDDIG */
- IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 0, 0x000, 0), /* MX53_PAD_EIM_RW__EMI_WEIM_RW */
- IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 1, 0x000, 0), /* MX53_PAD_EIM_RW__GPIO2_26 */
- IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 2, 0x7C4, 2), /* MX53_PAD_EIM_RW__ECSPI2_SS0 */
- IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 3, 0x000, 0), /* MX53_PAD_EIM_RW__IPU_DI1_PIN8 */
- IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 7, 0x000, 0), /* MX53_PAD_EIM_RW__USBPHY2_HOSTDISCONNECT */
- IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 0, 0x000, 0), /* MX53_PAD_EIM_LBA__EMI_WEIM_LBA */
- IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 1, 0x000, 0), /* MX53_PAD_EIM_LBA__GPIO2_27 */
- IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 2, 0x7C8, 1), /* MX53_PAD_EIM_LBA__ECSPI2_SS1 */
- IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 3, 0x000, 0), /* MX53_PAD_EIM_LBA__IPU_DI1_PIN17 */
- IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 7, 0x000, 0), /* MX53_PAD_EIM_LBA__SRC_BT_CFG1_0 */
- IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 0, 0x000, 0), /* MX53_PAD_EIM_EB0__EMI_WEIM_EB_0 */
- IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 1, 0x000, 0), /* MX53_PAD_EIM_EB0__GPIO2_28 */
- IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 3, 0x000, 0), /* MX53_PAD_EIM_EB0__IPU_DISP1_DAT_11 */
- IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 4, 0x000, 0), /* MX53_PAD_EIM_EB0__IPU_CSI1_D_11 */
- IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 5, 0x810, 0), /* MX53_PAD_EIM_EB0__GPC_PMIC_RDY */
- IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 7, 0x000, 0), /* MX53_PAD_EIM_EB0__SRC_BT_CFG2_7 */
- IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 0, 0x000, 0), /* MX53_PAD_EIM_EB1__EMI_WEIM_EB_1 */
- IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 1, 0x000, 0), /* MX53_PAD_EIM_EB1__GPIO2_29 */
- IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 3, 0x000, 0), /* MX53_PAD_EIM_EB1__IPU_DISP1_DAT_10 */
- IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 4, 0x000, 0), /* MX53_PAD_EIM_EB1__IPU_CSI1_D_10 */
- IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 7, 0x000, 0), /* MX53_PAD_EIM_EB1__SRC_BT_CFG2_6 */
- IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 0, 0x000, 0), /* MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 */
- IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 1, 0x000, 0), /* MX53_PAD_EIM_DA0__GPIO3_0 */
- IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 3, 0x000, 0), /* MX53_PAD_EIM_DA0__IPU_DISP1_DAT_9 */
- IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 4, 0x000, 0), /* MX53_PAD_EIM_DA0__IPU_CSI1_D_9 */
- IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 7, 0x000, 0), /* MX53_PAD_EIM_DA0__SRC_BT_CFG2_5 */
- IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 0, 0x000, 0), /* MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 */
- IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 1, 0x000, 0), /* MX53_PAD_EIM_DA1__GPIO3_1 */
- IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 3, 0x000, 0), /* MX53_PAD_EIM_DA1__IPU_DISP1_DAT_8 */
- IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 4, 0x000, 0), /* MX53_PAD_EIM_DA1__IPU_CSI1_D_8 */
- IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 7, 0x000, 0), /* MX53_PAD_EIM_DA1__SRC_BT_CFG2_4 */
- IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 0, 0x000, 0), /* MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 1, 0x000, 0), /* MX53_PAD_EIM_DA2__GPIO3_2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 3, 0x000, 0), /* MX53_PAD_EIM_DA2__IPU_DISP1_DAT_7 */
- IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 4, 0x000, 0), /* MX53_PAD_EIM_DA2__IPU_CSI1_D_7 */
- IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 7, 0x000, 0), /* MX53_PAD_EIM_DA2__SRC_BT_CFG2_3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 0, 0x000, 0), /* MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 1, 0x000, 0), /* MX53_PAD_EIM_DA3__GPIO3_3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 3, 0x000, 0), /* MX53_PAD_EIM_DA3__IPU_DISP1_DAT_6 */
- IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 4, 0x000, 0), /* MX53_PAD_EIM_DA3__IPU_CSI1_D_6 */
- IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 7, 0x000, 0), /* MX53_PAD_EIM_DA3__SRC_BT_CFG2_2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 0, 0x000, 0), /* MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 */
- IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 1, 0x000, 0), /* MX53_PAD_EIM_DA4__GPIO3_4 */
- IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 3, 0x000, 0), /* MX53_PAD_EIM_DA4__IPU_DISP1_DAT_5 */
- IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 4, 0x000, 0), /* MX53_PAD_EIM_DA4__IPU_CSI1_D_5 */
- IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 7, 0x000, 0), /* MX53_PAD_EIM_DA4__SRC_BT_CFG3_7 */
- IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 0, 0x000, 0), /* MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 */
- IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 1, 0x000, 0), /* MX53_PAD_EIM_DA5__GPIO3_5 */
- IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 3, 0x000, 0), /* MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4 */
- IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 4, 0x000, 0), /* MX53_PAD_EIM_DA5__IPU_CSI1_D_4 */
- IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 7, 0x000, 0), /* MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 */
- IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 0, 0x000, 0), /* MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 */
- IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 1, 0x000, 0), /* MX53_PAD_EIM_DA6__GPIO3_6 */
- IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 3, 0x000, 0), /* MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 4, 0x000, 0), /* MX53_PAD_EIM_DA6__IPU_CSI1_D_3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 7, 0x000, 0), /* MX53_PAD_EIM_DA6__SRC_BT_CFG3_5 */
- IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 0, 0x000, 0), /* MX53_PAD_EIM_DA7__EMI_NAND_WEIM_DA_7 */
- IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 1, 0x000, 0), /* MX53_PAD_EIM_DA7__GPIO3_7 */
- IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 3, 0x000, 0), /* MX53_PAD_EIM_DA7__IPU_DISP1_DAT_2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 4, 0x000, 0), /* MX53_PAD_EIM_DA7__IPU_CSI1_D_2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 7, 0x000, 0), /* MX53_PAD_EIM_DA7__SRC_BT_CFG3_4 */
- IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 0, 0x000, 0), /* MX53_PAD_EIM_DA8__EMI_NAND_WEIM_DA_8 */
- IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 1, 0x000, 0), /* MX53_PAD_EIM_DA8__GPIO3_8 */
- IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 3, 0x000, 0), /* MX53_PAD_EIM_DA8__IPU_DISP1_DAT_1 */
- IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 4, 0x000, 0), /* MX53_PAD_EIM_DA8__IPU_CSI1_D_1 */
- IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 7, 0x000, 0), /* MX53_PAD_EIM_DA8__SRC_BT_CFG3_3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 0, 0x000, 0), /* MX53_PAD_EIM_DA9__EMI_NAND_WEIM_DA_9 */
- IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 1, 0x000, 0), /* MX53_PAD_EIM_DA9__GPIO3_9 */
- IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 3, 0x000, 0), /* MX53_PAD_EIM_DA9__IPU_DISP1_DAT_0 */
- IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 4, 0x000, 0), /* MX53_PAD_EIM_DA9__IPU_CSI1_D_0 */
- IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 7, 0x000, 0), /* MX53_PAD_EIM_DA9__SRC_BT_CFG3_2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 0, 0x000, 0), /* MX53_PAD_EIM_DA10__EMI_NAND_WEIM_DA_10 */
- IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 1, 0x000, 0), /* MX53_PAD_EIM_DA10__GPIO3_10 */
- IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 3, 0x000, 0), /* MX53_PAD_EIM_DA10__IPU_DI1_PIN15 */
- IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 4, 0x834, 1), /* MX53_PAD_EIM_DA10__IPU_CSI1_DATA_EN */
- IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 7, 0x000, 0), /* MX53_PAD_EIM_DA10__SRC_BT_CFG3_1 */
- IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 0, 0x000, 0), /* MX53_PAD_EIM_DA11__EMI_NAND_WEIM_DA_11 */
- IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 1, 0x000, 0), /* MX53_PAD_EIM_DA11__GPIO3_11 */
- IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 3, 0x000, 0), /* MX53_PAD_EIM_DA11__IPU_DI1_PIN2 */
- IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 4, 0x838, 1), /* MX53_PAD_EIM_DA11__IPU_CSI1_HSYNC */
- IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 0, 0x000, 0), /* MX53_PAD_EIM_DA12__EMI_NAND_WEIM_DA_12 */
- IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 1, 0x000, 0), /* MX53_PAD_EIM_DA12__GPIO3_12 */
- IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 3, 0x000, 0), /* MX53_PAD_EIM_DA12__IPU_DI1_PIN3 */
- IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 4, 0x83C, 1), /* MX53_PAD_EIM_DA12__IPU_CSI1_VSYNC */
- IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 0, 0x000, 0), /* MX53_PAD_EIM_DA13__EMI_NAND_WEIM_DA_13 */
- IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 1, 0x000, 0), /* MX53_PAD_EIM_DA13__GPIO3_13 */
- IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 3, 0x000, 0), /* MX53_PAD_EIM_DA13__IPU_DI1_D0_CS */
- IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 4, 0x76C, 1), /* MX53_PAD_EIM_DA13__CCM_DI1_EXT_CLK */
- IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 0, 0x000, 0), /* MX53_PAD_EIM_DA14__EMI_NAND_WEIM_DA_14 */
- IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 1, 0x000, 0), /* MX53_PAD_EIM_DA14__GPIO3_14 */
- IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 3, 0x000, 0), /* MX53_PAD_EIM_DA14__IPU_DI1_D1_CS */
- IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 4, 0x000, 0), /* MX53_PAD_EIM_DA14__CCM_DI0_EXT_CLK */
- IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 0, 0x000, 0), /* MX53_PAD_EIM_DA15__EMI_NAND_WEIM_DA_15 */
- IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 1, 0x000, 0), /* MX53_PAD_EIM_DA15__GPIO3_15 */
- IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 3, 0x000, 0), /* MX53_PAD_EIM_DA15__IPU_DI1_PIN1 */
- IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 4, 0x000, 0), /* MX53_PAD_EIM_DA15__IPU_DI1_PIN4 */
- IMX_PIN_REG(MX53_PAD_NANDF_WE_B, 0x52C, 0x1DC, 0, 0x000, 0), /* MX53_PAD_NANDF_WE_B__EMI_NANDF_WE_B */
- IMX_PIN_REG(MX53_PAD_NANDF_WE_B, 0x52C, 0x1DC, 1, 0x000, 0), /* MX53_PAD_NANDF_WE_B__GPIO6_12 */
- IMX_PIN_REG(MX53_PAD_NANDF_RE_B, 0x530, 0x1E0, 0, 0x000, 0), /* MX53_PAD_NANDF_RE_B__EMI_NANDF_RE_B */
- IMX_PIN_REG(MX53_PAD_NANDF_RE_B, 0x530, 0x1E0, 1, 0x000, 0), /* MX53_PAD_NANDF_RE_B__GPIO6_13 */
- IMX_PIN_REG(MX53_PAD_EIM_WAIT, 0x534, 0x1E4, 0, 0x000, 0), /* MX53_PAD_EIM_WAIT__EMI_WEIM_WAIT */
- IMX_PIN_REG(MX53_PAD_EIM_WAIT, 0x534, 0x1E4, 1, 0x000, 0), /* MX53_PAD_EIM_WAIT__GPIO5_0 */
- IMX_PIN_REG(MX53_PAD_EIM_WAIT, 0x534, 0x1E4, 2, 0x000, 0), /* MX53_PAD_EIM_WAIT__EMI_WEIM_DTACK_B */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX3_P, NO_PAD, 0x1EC, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX3_P__GPIO6_22 */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX3_P, NO_PAD, 0x1EC, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX2_P, NO_PAD, 0x1F0, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX2_P__GPIO6_24 */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX2_P, NO_PAD, 0x1F0, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 */
- IMX_PIN_REG(MX53_PAD_LVDS1_CLK_P, NO_PAD, 0x1F4, 0, 0x000, 0), /* MX53_PAD_LVDS1_CLK_P__GPIO6_26 */
- IMX_PIN_REG(MX53_PAD_LVDS1_CLK_P, NO_PAD, 0x1F4, 1, 0x000, 0), /* MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX1_P, NO_PAD, 0x1F8, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX1_P__GPIO6_28 */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX1_P, NO_PAD, 0x1F8, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX0_P, NO_PAD, 0x1FC, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX0_P__GPIO6_30 */
- IMX_PIN_REG(MX53_PAD_LVDS1_TX0_P, NO_PAD, 0x1FC, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX3_P, NO_PAD, 0x200, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX3_P__GPIO7_22 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX3_P, NO_PAD, 0x200, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 */
- IMX_PIN_REG(MX53_PAD_LVDS0_CLK_P, NO_PAD, 0x204, 0, 0x000, 0), /* MX53_PAD_LVDS0_CLK_P__GPIO7_24 */
- IMX_PIN_REG(MX53_PAD_LVDS0_CLK_P, NO_PAD, 0x204, 1, 0x000, 0), /* MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX2_P, NO_PAD, 0x208, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX2_P__GPIO7_26 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX2_P, NO_PAD, 0x208, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX1_P, NO_PAD, 0x20C, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX1_P__GPIO7_28 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX1_P, NO_PAD, 0x20C, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX0_P, NO_PAD, 0x210, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX0_P__GPIO7_30 */
- IMX_PIN_REG(MX53_PAD_LVDS0_TX0_P, NO_PAD, 0x210, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 */
- IMX_PIN_REG(MX53_PAD_GPIO_10, 0x540, 0x214, 0, 0x000, 0), /* MX53_PAD_GPIO_10__GPIO4_0 */
- IMX_PIN_REG(MX53_PAD_GPIO_10, 0x540, 0x214, 1, 0x000, 0), /* MX53_PAD_GPIO_10__OSC32k_32K_OUT */
- IMX_PIN_REG(MX53_PAD_GPIO_11, 0x544, 0x218, 0, 0x000, 0), /* MX53_PAD_GPIO_11__GPIO4_1 */
- IMX_PIN_REG(MX53_PAD_GPIO_12, 0x548, 0x21C, 0, 0x000, 0), /* MX53_PAD_GPIO_12__GPIO4_2 */
- IMX_PIN_REG(MX53_PAD_GPIO_13, 0x54C, 0x220, 0, 0x000, 0), /* MX53_PAD_GPIO_13__GPIO4_3 */
- IMX_PIN_REG(MX53_PAD_GPIO_14, 0x550, 0x224, 0, 0x000, 0), /* MX53_PAD_GPIO_14__GPIO4_4 */
- IMX_PIN_REG(MX53_PAD_NANDF_CLE, 0x5A0, 0x228, 0, 0x000, 0), /* MX53_PAD_NANDF_CLE__EMI_NANDF_CLE */
- IMX_PIN_REG(MX53_PAD_NANDF_CLE, 0x5A0, 0x228, 1, 0x000, 0), /* MX53_PAD_NANDF_CLE__GPIO6_7 */
- IMX_PIN_REG(MX53_PAD_NANDF_CLE, 0x5A0, 0x228, 7, 0x000, 0), /* MX53_PAD_NANDF_CLE__USBPHY1_VSTATUS_0 */
- IMX_PIN_REG(MX53_PAD_NANDF_ALE, 0x5A4, 0x22C, 0, 0x000, 0), /* MX53_PAD_NANDF_ALE__EMI_NANDF_ALE */
- IMX_PIN_REG(MX53_PAD_NANDF_ALE, 0x5A4, 0x22C, 1, 0x000, 0), /* MX53_PAD_NANDF_ALE__GPIO6_8 */
- IMX_PIN_REG(MX53_PAD_NANDF_ALE, 0x5A4, 0x22C, 7, 0x000, 0), /* MX53_PAD_NANDF_ALE__USBPHY1_VSTATUS_1 */
- IMX_PIN_REG(MX53_PAD_NANDF_WP_B, 0x5A8, 0x230, 0, 0x000, 0), /* MX53_PAD_NANDF_WP_B__EMI_NANDF_WP_B */
- IMX_PIN_REG(MX53_PAD_NANDF_WP_B, 0x5A8, 0x230, 1, 0x000, 0), /* MX53_PAD_NANDF_WP_B__GPIO6_9 */
- IMX_PIN_REG(MX53_PAD_NANDF_WP_B, 0x5A8, 0x230, 7, 0x000, 0), /* MX53_PAD_NANDF_WP_B__USBPHY1_VSTATUS_2 */
- IMX_PIN_REG(MX53_PAD_NANDF_RB0, 0x5AC, 0x234, 0, 0x000, 0), /* MX53_PAD_NANDF_RB0__EMI_NANDF_RB_0 */
- IMX_PIN_REG(MX53_PAD_NANDF_RB0, 0x5AC, 0x234, 1, 0x000, 0), /* MX53_PAD_NANDF_RB0__GPIO6_10 */
- IMX_PIN_REG(MX53_PAD_NANDF_RB0, 0x5AC, 0x234, 7, 0x000, 0), /* MX53_PAD_NANDF_RB0__USBPHY1_VSTATUS_3 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS0, 0x5B0, 0x238, 0, 0x000, 0), /* MX53_PAD_NANDF_CS0__EMI_NANDF_CS_0 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS0, 0x5B0, 0x238, 1, 0x000, 0), /* MX53_PAD_NANDF_CS0__GPIO6_11 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS0, 0x5B0, 0x238, 7, 0x000, 0), /* MX53_PAD_NANDF_CS0__USBPHY1_VSTATUS_4 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 0, 0x000, 0), /* MX53_PAD_NANDF_CS1__EMI_NANDF_CS_1 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 1, 0x000, 0), /* MX53_PAD_NANDF_CS1__GPIO6_14 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 6, 0x858, 0), /* MX53_PAD_NANDF_CS1__MLB_MLBCLK */
- IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 7, 0x000, 0), /* MX53_PAD_NANDF_CS1__USBPHY1_VSTATUS_5 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 0, 0x000, 0), /* MX53_PAD_NANDF_CS2__EMI_NANDF_CS_2 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 1, 0x000, 0), /* MX53_PAD_NANDF_CS2__GPIO6_15 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 2, 0x000, 0), /* MX53_PAD_NANDF_CS2__IPU_SISG_0 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 3, 0x7E4, 0), /* MX53_PAD_NANDF_CS2__ESAI1_TX0 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 4, 0x000, 0), /* MX53_PAD_NANDF_CS2__EMI_WEIM_CRE */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 5, 0x000, 0), /* MX53_PAD_NANDF_CS2__CCM_CSI0_MCLK */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 6, 0x860, 0), /* MX53_PAD_NANDF_CS2__MLB_MLBSIG */
- IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 7, 0x000, 0), /* MX53_PAD_NANDF_CS2__USBPHY1_VSTATUS_6 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 0, 0x000, 0), /* MX53_PAD_NANDF_CS3__EMI_NANDF_CS_3 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 1, 0x000, 0), /* MX53_PAD_NANDF_CS3__GPIO6_16 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 2, 0x000, 0), /* MX53_PAD_NANDF_CS3__IPU_SISG_1 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 3, 0x7E8, 0), /* MX53_PAD_NANDF_CS3__ESAI1_TX1 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 4, 0x000, 0), /* MX53_PAD_NANDF_CS3__EMI_WEIM_A_26 */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 6, 0x85C, 0), /* MX53_PAD_NANDF_CS3__MLB_MLBDAT */
- IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 7, 0x000, 0), /* MX53_PAD_NANDF_CS3__USBPHY1_VSTATUS_7 */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 0, 0x804, 1), /* MX53_PAD_FEC_MDIO__FEC_MDIO */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 1, 0x000, 0), /* MX53_PAD_FEC_MDIO__GPIO1_22 */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 2, 0x7DC, 0), /* MX53_PAD_FEC_MDIO__ESAI1_SCKR */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 3, 0x800, 1), /* MX53_PAD_FEC_MDIO__FEC_COL */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 4, 0x000, 0), /* MX53_PAD_FEC_MDIO__RTC_CE_RTC_PS2 */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 5, 0x000, 0), /* MX53_PAD_FEC_MDIO__SDMA_DEBUG_BUS_DEVICE_3 */
- IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 6, 0x000, 0), /* MX53_PAD_FEC_MDIO__EMI_EMI_DEBUG_49 */
- IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 0, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__FEC_TX_CLK */
- IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 1, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__GPIO1_23 */
- IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 2, 0x7CC, 0), /* MX53_PAD_FEC_REF_CLK__ESAI1_FSR */
- IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 5, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__SDMA_DEBUG_BUS_DEVICE_4 */
- IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 6, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__EMI_EMI_DEBUG_50 */
- IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 0, 0x000, 0), /* MX53_PAD_FEC_RX_ER__FEC_RX_ER */
- IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 1, 0x000, 0), /* MX53_PAD_FEC_RX_ER__GPIO1_24 */
- IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 2, 0x7D4, 0), /* MX53_PAD_FEC_RX_ER__ESAI1_HCKR */
- IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 3, 0x808, 1), /* MX53_PAD_FEC_RX_ER__FEC_RX_CLK */
- IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 4, 0x000, 0), /* MX53_PAD_FEC_RX_ER__RTC_CE_RTC_PS3 */
- IMX_PIN_REG(MX53_PAD_FEC_CRS_DV, 0x5D0, 0x254, 0, 0x000, 0), /* MX53_PAD_FEC_CRS_DV__FEC_RX_DV */
- IMX_PIN_REG(MX53_PAD_FEC_CRS_DV, 0x5D0, 0x254, 1, 0x000, 0), /* MX53_PAD_FEC_CRS_DV__GPIO1_25 */
- IMX_PIN_REG(MX53_PAD_FEC_CRS_DV, 0x5D0, 0x254, 2, 0x7E0, 0), /* MX53_PAD_FEC_CRS_DV__ESAI1_SCKT */
- IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 0, 0x000, 0), /* MX53_PAD_FEC_RXD1__FEC_RDATA_1 */
- IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 1, 0x000, 0), /* MX53_PAD_FEC_RXD1__GPIO1_26 */
- IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 2, 0x7D0, 0), /* MX53_PAD_FEC_RXD1__ESAI1_FST */
- IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 3, 0x860, 1), /* MX53_PAD_FEC_RXD1__MLB_MLBSIG */
- IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 4, 0x000, 0), /* MX53_PAD_FEC_RXD1__RTC_CE_RTC_PS1 */
- IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 0, 0x000, 0), /* MX53_PAD_FEC_RXD0__FEC_RDATA_0 */
- IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 1, 0x000, 0), /* MX53_PAD_FEC_RXD0__GPIO1_27 */
- IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 2, 0x7D8, 0), /* MX53_PAD_FEC_RXD0__ESAI1_HCKT */
- IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 3, 0x000, 0), /* MX53_PAD_FEC_RXD0__OSC32k_32K_OUT */
- IMX_PIN_REG(MX53_PAD_FEC_TX_EN, 0x5DC, 0x260, 0, 0x000, 0), /* MX53_PAD_FEC_TX_EN__FEC_TX_EN */
- IMX_PIN_REG(MX53_PAD_FEC_TX_EN, 0x5DC, 0x260, 1, 0x000, 0), /* MX53_PAD_FEC_TX_EN__GPIO1_28 */
- IMX_PIN_REG(MX53_PAD_FEC_TX_EN, 0x5DC, 0x260, 2, 0x7F0, 0), /* MX53_PAD_FEC_TX_EN__ESAI1_TX3_RX2 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 0, 0x000, 0), /* MX53_PAD_FEC_TXD1__FEC_TDATA_1 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 1, 0x000, 0), /* MX53_PAD_FEC_TXD1__GPIO1_29 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 2, 0x7EC, 0), /* MX53_PAD_FEC_TXD1__ESAI1_TX2_RX3 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 3, 0x858, 1), /* MX53_PAD_FEC_TXD1__MLB_MLBCLK */
- IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 4, 0x000, 0), /* MX53_PAD_FEC_TXD1__RTC_CE_RTC_PRSC_CLK */
- IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 0, 0x000, 0), /* MX53_PAD_FEC_TXD0__FEC_TDATA_0 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 1, 0x000, 0), /* MX53_PAD_FEC_TXD0__GPIO1_30 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 2, 0x7F4, 0), /* MX53_PAD_FEC_TXD0__ESAI1_TX4_RX1 */
- IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 7, 0x000, 0), /* MX53_PAD_FEC_TXD0__USBPHY2_DATAOUT_0 */
- IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 0, 0x000, 0), /* MX53_PAD_FEC_MDC__FEC_MDC */
- IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 1, 0x000, 0), /* MX53_PAD_FEC_MDC__GPIO1_31 */
- IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 2, 0x7F8, 0), /* MX53_PAD_FEC_MDC__ESAI1_TX5_RX0 */
- IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 3, 0x85C, 1), /* MX53_PAD_FEC_MDC__MLB_MLBDAT */
- IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 4, 0x000, 0), /* MX53_PAD_FEC_MDC__RTC_CE_RTC_ALARM1_TRIG */
- IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 7, 0x000, 0), /* MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 0, 0x000, 0), /* MX53_PAD_PATA_DIOW__PATA_DIOW */
- IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 1, 0x000, 0), /* MX53_PAD_PATA_DIOW__GPIO6_17 */
- IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 3, 0x000, 0), /* MX53_PAD_PATA_DIOW__UART1_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 7, 0x000, 0), /* MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 0, 0x000, 0), /* MX53_PAD_PATA_DMACK__PATA_DMACK */
- IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 1, 0x000, 0), /* MX53_PAD_PATA_DMACK__GPIO6_18 */
- IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 3, 0x878, 3), /* MX53_PAD_PATA_DMACK__UART1_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 7, 0x000, 0), /* MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 0, 0x000, 0), /* MX53_PAD_PATA_DMARQ__PATA_DMARQ */
- IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 1, 0x000, 0), /* MX53_PAD_PATA_DMARQ__GPIO7_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 3, 0x000, 0), /* MX53_PAD_PATA_DMARQ__UART2_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 5, 0x000, 0), /* MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 7, 0x000, 0), /* MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4 */
- IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 0, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN */
- IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 1, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__GPIO7_1 */
- IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 3, 0x880, 3), /* MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 5, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__CCM_CCM_OUT_1 */
- IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 7, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5 */
- IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 0, 0x000, 0), /* MX53_PAD_PATA_INTRQ__PATA_INTRQ */
- IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 1, 0x000, 0), /* MX53_PAD_PATA_INTRQ__GPIO7_2 */
- IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 3, 0x000, 0), /* MX53_PAD_PATA_INTRQ__UART2_CTS */
- IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 4, 0x000, 0), /* MX53_PAD_PATA_INTRQ__CAN1_TXCAN */
- IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 5, 0x000, 0), /* MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2 */
- IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 7, 0x000, 0), /* MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 0, 0x000, 0), /* MX53_PAD_PATA_DIOR__PATA_DIOR */
- IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 1, 0x000, 0), /* MX53_PAD_PATA_DIOR__GPIO7_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 3, 0x87C, 3), /* MX53_PAD_PATA_DIOR__UART2_RTS */
- IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 4, 0x760, 1), /* MX53_PAD_PATA_DIOR__CAN1_RXCAN */
- IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 7, 0x000, 0), /* MX53_PAD_PATA_DIOR__USBPHY2_DATAOUT_7 */
- IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 0, 0x000, 0), /* MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B */
- IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 1, 0x000, 0), /* MX53_PAD_PATA_RESET_B__GPIO7_4 */
- IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 2, 0x000, 0), /* MX53_PAD_PATA_RESET_B__ESDHC3_CMD */
- IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 3, 0x000, 0), /* MX53_PAD_PATA_RESET_B__UART1_CTS */
- IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 4, 0x000, 0), /* MX53_PAD_PATA_RESET_B__CAN2_TXCAN */
- IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 7, 0x000, 0), /* MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 */
- IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 0, 0x000, 0), /* MX53_PAD_PATA_IORDY__PATA_IORDY */
- IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 1, 0x000, 0), /* MX53_PAD_PATA_IORDY__GPIO7_5 */
- IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 2, 0x000, 0), /* MX53_PAD_PATA_IORDY__ESDHC3_CLK */
- IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 3, 0x874, 3), /* MX53_PAD_PATA_IORDY__UART1_RTS */
- IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 4, 0x764, 1), /* MX53_PAD_PATA_IORDY__CAN2_RXCAN */
- IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 7, 0x000, 0), /* MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 0, 0x000, 0), /* MX53_PAD_PATA_DA_0__PATA_DA_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 1, 0x000, 0), /* MX53_PAD_PATA_DA_0__GPIO7_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 2, 0x000, 0), /* MX53_PAD_PATA_DA_0__ESDHC3_RST */
- IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 4, 0x864, 0), /* MX53_PAD_PATA_DA_0__OWIRE_LINE */
- IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 7, 0x000, 0), /* MX53_PAD_PATA_DA_0__USBPHY1_DATAOUT_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 0, 0x000, 0), /* MX53_PAD_PATA_DA_1__PATA_DA_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 1, 0x000, 0), /* MX53_PAD_PATA_DA_1__GPIO7_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 2, 0x000, 0), /* MX53_PAD_PATA_DA_1__ESDHC4_CMD */
- IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 4, 0x000, 0), /* MX53_PAD_PATA_DA_1__UART3_CTS */
- IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 7, 0x000, 0), /* MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 0, 0x000, 0), /* MX53_PAD_PATA_DA_2__PATA_DA_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 1, 0x000, 0), /* MX53_PAD_PATA_DA_2__GPIO7_8 */
- IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 2, 0x000, 0), /* MX53_PAD_PATA_DA_2__ESDHC4_CLK */
- IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 4, 0x884, 5), /* MX53_PAD_PATA_DA_2__UART3_RTS */
- IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 7, 0x000, 0), /* MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4 */
- IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 0, 0x000, 0), /* MX53_PAD_PATA_CS_0__PATA_CS_0 */
- IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 1, 0x000, 0), /* MX53_PAD_PATA_CS_0__GPIO7_9 */
- IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 4, 0x000, 0), /* MX53_PAD_PATA_CS_0__UART3_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 7, 0x000, 0), /* MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5 */
- IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 0, 0x000, 0), /* MX53_PAD_PATA_CS_1__PATA_CS_1 */
- IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 1, 0x000, 0), /* MX53_PAD_PATA_CS_1__GPIO7_10 */
- IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 4, 0x888, 3), /* MX53_PAD_PATA_CS_1__UART3_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 7, 0x000, 0), /* MX53_PAD_PATA_CS_1__USBPHY1_DATAOUT_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA0__PATA_DATA_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA0__GPIO2_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA0__EMI_NANDF_D_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA0__ESDHC3_DAT4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA0__GPU3d_GPU_DEBUG_OUT_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA0__IPU_DIAG_BUS_0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 7, 0x000, 0), /* MX53_PAD_PATA_DATA0__USBPHY1_DATAOUT_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA1__PATA_DATA_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA1__GPIO2_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA1__EMI_NANDF_D_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA1__ESDHC3_DAT5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA1__GPU3d_GPU_DEBUG_OUT_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA1__IPU_DIAG_BUS_1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA2__PATA_DATA_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA2__GPIO2_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA2__EMI_NANDF_D_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA2__ESDHC3_DAT6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA2__GPU3d_GPU_DEBUG_OUT_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA2__IPU_DIAG_BUS_2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA3__PATA_DATA_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA3__GPIO2_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA3__EMI_NANDF_D_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA3__ESDHC3_DAT7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA3__GPU3d_GPU_DEBUG_OUT_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA3__IPU_DIAG_BUS_3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA4__PATA_DATA_4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA4__GPIO2_4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA4__EMI_NANDF_D_4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA4__ESDHC4_DAT4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA4__GPU3d_GPU_DEBUG_OUT_4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA4__IPU_DIAG_BUS_4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA5__PATA_DATA_5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA5__GPIO2_5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA5__EMI_NANDF_D_5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA5__ESDHC4_DAT5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA6__PATA_DATA_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA6__GPIO2_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA6__ESDHC4_DAT6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA7__PATA_DATA_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA7__GPIO2_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA7__EMI_NANDF_D_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA7__ESDHC4_DAT7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA7__GPU3d_GPU_DEBUG_OUT_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA7__IPU_DIAG_BUS_7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA8__PATA_DATA_8 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA8__GPIO2_8 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 2, 0x000, 0), /* MX53_PAD_PATA_DATA8__ESDHC1_DAT4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA8__EMI_NANDF_D_8 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA8__ESDHC3_DAT0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA8__GPU3d_GPU_DEBUG_OUT_8 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA8__IPU_DIAG_BUS_8 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA9__PATA_DATA_9 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA9__GPIO2_9 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 2, 0x000, 0), /* MX53_PAD_PATA_DATA9__ESDHC1_DAT5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA9__EMI_NANDF_D_9 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA9__ESDHC3_DAT1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA9__GPU3d_GPU_DEBUG_OUT_9 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA9__IPU_DIAG_BUS_9 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA10__PATA_DATA_10 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA10__GPIO2_10 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 2, 0x000, 0), /* MX53_PAD_PATA_DATA10__ESDHC1_DAT6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA10__EMI_NANDF_D_10 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA10__ESDHC3_DAT2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA10__GPU3d_GPU_DEBUG_OUT_10 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA10__IPU_DIAG_BUS_10 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA11__PATA_DATA_11 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA11__GPIO2_11 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 2, 0x000, 0), /* MX53_PAD_PATA_DATA11__ESDHC1_DAT7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA11__EMI_NANDF_D_11 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA11__ESDHC3_DAT3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA11__GPU3d_GPU_DEBUG_OUT_11 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA11__IPU_DIAG_BUS_11 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA12__PATA_DATA_12 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA12__GPIO2_12 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 2, 0x000, 0), /* MX53_PAD_PATA_DATA12__ESDHC2_DAT4 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA12__EMI_NANDF_D_12 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA12__ESDHC4_DAT0 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA12__GPU3d_GPU_DEBUG_OUT_12 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA12__IPU_DIAG_BUS_12 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA13__PATA_DATA_13 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA13__GPIO2_13 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 2, 0x000, 0), /* MX53_PAD_PATA_DATA13__ESDHC2_DAT5 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA13__EMI_NANDF_D_13 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA13__ESDHC4_DAT1 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA13__GPU3d_GPU_DEBUG_OUT_13 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA13__IPU_DIAG_BUS_13 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA14__PATA_DATA_14 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA14__GPIO2_14 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 2, 0x000, 0), /* MX53_PAD_PATA_DATA14__ESDHC2_DAT6 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA14__EMI_NANDF_D_14 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA14__ESDHC4_DAT2 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA14__GPU3d_GPU_DEBUG_OUT_14 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA14__IPU_DIAG_BUS_14 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA15__PATA_DATA_15 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA15__GPIO2_15 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 2, 0x000, 0), /* MX53_PAD_PATA_DATA15__ESDHC2_DAT7 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA15__EMI_NANDF_D_15 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA15__ESDHC4_DAT3 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA15__GPU3d_GPU_DEBUG_OUT_15 */
- IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA15__IPU_DIAG_BUS_15 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 0, 0x000, 0), /* MX53_PAD_SD1_DATA0__ESDHC1_DAT0 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 1, 0x000, 0), /* MX53_PAD_SD1_DATA0__GPIO1_16 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 3, 0x000, 0), /* MX53_PAD_SD1_DATA0__GPT_CAPIN1 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 5, 0x784, 2), /* MX53_PAD_SD1_DATA0__CSPI_MISO */
- IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 7, 0x778, 0), /* MX53_PAD_SD1_DATA0__CCM_PLL3_BYP */
- IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 0, 0x000, 0), /* MX53_PAD_SD1_DATA1__ESDHC1_DAT1 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 1, 0x000, 0), /* MX53_PAD_SD1_DATA1__GPIO1_17 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 3, 0x000, 0), /* MX53_PAD_SD1_DATA1__GPT_CAPIN2 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 5, 0x78C, 3), /* MX53_PAD_SD1_DATA1__CSPI_SS0 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 7, 0x77C, 1), /* MX53_PAD_SD1_DATA1__CCM_PLL4_BYP */
- IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 0, 0x000, 0), /* MX53_PAD_SD1_CMD__ESDHC1_CMD */
- IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 1, 0x000, 0), /* MX53_PAD_SD1_CMD__GPIO1_18 */
- IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 3, 0x000, 0), /* MX53_PAD_SD1_CMD__GPT_CMPOUT1 */
- IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 5, 0x788, 2), /* MX53_PAD_SD1_CMD__CSPI_MOSI */
- IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 7, 0x770, 0), /* MX53_PAD_SD1_CMD__CCM_PLL1_BYP */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 0, 0x000, 0), /* MX53_PAD_SD1_DATA2__ESDHC1_DAT2 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 1, 0x000, 0), /* MX53_PAD_SD1_DATA2__GPIO1_19 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 2, 0x000, 0), /* MX53_PAD_SD1_DATA2__GPT_CMPOUT2 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 3, 0x000, 0), /* MX53_PAD_SD1_DATA2__PWM2_PWMO */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 4, 0x000, 0), /* MX53_PAD_SD1_DATA2__WDOG1_WDOG_B */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 5, 0x790, 2), /* MX53_PAD_SD1_DATA2__CSPI_SS1 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 6, 0x000, 0), /* MX53_PAD_SD1_DATA2__WDOG1_WDOG_RST_B_DEB */
- IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 7, 0x774, 0), /* MX53_PAD_SD1_DATA2__CCM_PLL2_BYP */
- IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 0, 0x000, 0), /* MX53_PAD_SD1_CLK__ESDHC1_CLK */
- IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 1, 0x000, 0), /* MX53_PAD_SD1_CLK__GPIO1_20 */
- IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 2, 0x000, 0), /* MX53_PAD_SD1_CLK__OSC32k_32K_OUT */
- IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 3, 0x000, 0), /* MX53_PAD_SD1_CLK__GPT_CLKIN */
- IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 5, 0x780, 2), /* MX53_PAD_SD1_CLK__CSPI_SCLK */
- IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 7, 0x000, 0), /* MX53_PAD_SD1_CLK__SATA_PHY_DTB_0 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 0, 0x000, 0), /* MX53_PAD_SD1_DATA3__ESDHC1_DAT3 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 1, 0x000, 0), /* MX53_PAD_SD1_DATA3__GPIO1_21 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 2, 0x000, 0), /* MX53_PAD_SD1_DATA3__GPT_CMPOUT3 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 3, 0x000, 0), /* MX53_PAD_SD1_DATA3__PWM1_PWMO */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 4, 0x000, 0), /* MX53_PAD_SD1_DATA3__WDOG2_WDOG_B */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 5, 0x794, 2), /* MX53_PAD_SD1_DATA3__CSPI_SS2 */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 6, 0x000, 0), /* MX53_PAD_SD1_DATA3__WDOG2_WDOG_RST_B_DEB */
- IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 7, 0x000, 0), /* MX53_PAD_SD1_DATA3__SATA_PHY_DTB_1 */
- IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 0, 0x000, 0), /* MX53_PAD_SD2_CLK__ESDHC2_CLK */
- IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 1, 0x000, 0), /* MX53_PAD_SD2_CLK__GPIO1_10 */
- IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 2, 0x840, 2), /* MX53_PAD_SD2_CLK__KPP_COL_5 */
- IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 3, 0x73C, 1), /* MX53_PAD_SD2_CLK__AUDMUX_AUD4_RXFS */
- IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 5, 0x780, 3), /* MX53_PAD_SD2_CLK__CSPI_SCLK */
- IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 7, 0x000, 0), /* MX53_PAD_SD2_CLK__SCC_RANDOM_V */
- IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 0, 0x000, 0), /* MX53_PAD_SD2_CMD__ESDHC2_CMD */
- IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 1, 0x000, 0), /* MX53_PAD_SD2_CMD__GPIO1_11 */
- IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 2, 0x84C, 1), /* MX53_PAD_SD2_CMD__KPP_ROW_5 */
- IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 3, 0x738, 1), /* MX53_PAD_SD2_CMD__AUDMUX_AUD4_RXC */
- IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 5, 0x788, 3), /* MX53_PAD_SD2_CMD__CSPI_MOSI */
- IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 7, 0x000, 0), /* MX53_PAD_SD2_CMD__SCC_RANDOM */
- IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 0, 0x000, 0), /* MX53_PAD_SD2_DATA3__ESDHC2_DAT3 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 1, 0x000, 0), /* MX53_PAD_SD2_DATA3__GPIO1_12 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 2, 0x844, 1), /* MX53_PAD_SD2_DATA3__KPP_COL_6 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 3, 0x740, 1), /* MX53_PAD_SD2_DATA3__AUDMUX_AUD4_TXC */
- IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 5, 0x794, 3), /* MX53_PAD_SD2_DATA3__CSPI_SS2 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 7, 0x000, 0), /* MX53_PAD_SD2_DATA3__SJC_DONE */
- IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 0, 0x000, 0), /* MX53_PAD_SD2_DATA2__ESDHC2_DAT2 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 1, 0x000, 0), /* MX53_PAD_SD2_DATA2__GPIO1_13 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 2, 0x850, 1), /* MX53_PAD_SD2_DATA2__KPP_ROW_6 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 3, 0x734, 1), /* MX53_PAD_SD2_DATA2__AUDMUX_AUD4_TXD */
- IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 5, 0x790, 3), /* MX53_PAD_SD2_DATA2__CSPI_SS1 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 7, 0x000, 0), /* MX53_PAD_SD2_DATA2__SJC_FAIL */
- IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 0, 0x000, 0), /* MX53_PAD_SD2_DATA1__ESDHC2_DAT1 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 1, 0x000, 0), /* MX53_PAD_SD2_DATA1__GPIO1_14 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 2, 0x848, 1), /* MX53_PAD_SD2_DATA1__KPP_COL_7 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 3, 0x744, 0), /* MX53_PAD_SD2_DATA1__AUDMUX_AUD4_TXFS */
- IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 5, 0x78C, 4), /* MX53_PAD_SD2_DATA1__CSPI_SS0 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 7, 0x000, 0), /* MX53_PAD_SD2_DATA1__RTIC_SEC_VIO */
- IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 0, 0x000, 0), /* MX53_PAD_SD2_DATA0__ESDHC2_DAT0 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 1, 0x000, 0), /* MX53_PAD_SD2_DATA0__GPIO1_15 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 2, 0x854, 1), /* MX53_PAD_SD2_DATA0__KPP_ROW_7 */
- IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 3, 0x730, 1), /* MX53_PAD_SD2_DATA0__AUDMUX_AUD4_RXD */
- IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 5, 0x784, 3), /* MX53_PAD_SD2_DATA0__CSPI_MISO */
- IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 7, 0x000, 0), /* MX53_PAD_SD2_DATA0__RTIC_DONE_INT */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 0, 0x000, 0), /* MX53_PAD_GPIO_0__CCM_CLKO */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 1, 0x000, 0), /* MX53_PAD_GPIO_0__GPIO1_0 */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 2, 0x840, 3), /* MX53_PAD_GPIO_0__KPP_COL_5 */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 3, 0x000, 0), /* MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 4, 0x000, 0), /* MX53_PAD_GPIO_0__EPIT1_EPITO */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 5, 0x000, 0), /* MX53_PAD_GPIO_0__SRTC_ALARM_DEB */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 6, 0x000, 0), /* MX53_PAD_GPIO_0__USBOH3_USBH1_PWR */
- IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 7, 0x000, 0), /* MX53_PAD_GPIO_0__CSU_TD */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 0, 0x7DC, 1), /* MX53_PAD_GPIO_1__ESAI1_SCKR */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 1, 0x000, 0), /* MX53_PAD_GPIO_1__GPIO1_1 */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 2, 0x84C, 2), /* MX53_PAD_GPIO_1__KPP_ROW_5 */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 3, 0x000, 0), /* MX53_PAD_GPIO_1__CCM_SSI_EXT2_CLK */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 4, 0x000, 0), /* MX53_PAD_GPIO_1__PWM2_PWMO */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 5, 0x000, 0), /* MX53_PAD_GPIO_1__WDOG2_WDOG_B */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 6, 0x000, 0), /* MX53_PAD_GPIO_1__ESDHC1_CD */
- IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 7, 0x000, 0), /* MX53_PAD_GPIO_1__SRC_TESTER_ACK */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 0, 0x7CC, 1), /* MX53_PAD_GPIO_9__ESAI1_FSR */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 1, 0x000, 0), /* MX53_PAD_GPIO_9__GPIO1_9 */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 2, 0x844, 2), /* MX53_PAD_GPIO_9__KPP_COL_6 */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 3, 0x000, 0), /* MX53_PAD_GPIO_9__CCM_REF_EN_B */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 4, 0x000, 0), /* MX53_PAD_GPIO_9__PWM1_PWMO */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 5, 0x000, 0), /* MX53_PAD_GPIO_9__WDOG1_WDOG_B */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 6, 0x7FC, 1), /* MX53_PAD_GPIO_9__ESDHC1_WP */
- IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 7, 0x000, 0), /* MX53_PAD_GPIO_9__SCC_FAIL_STATE */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 0, 0x7D4, 1), /* MX53_PAD_GPIO_3__ESAI1_HCKR */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 1, 0x000, 0), /* MX53_PAD_GPIO_3__GPIO1_3 */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 2, 0x824, 1), /* MX53_PAD_GPIO_3__I2C3_SCL */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 3, 0x000, 0), /* MX53_PAD_GPIO_3__DPLLIP1_TOG_EN */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 4, 0x000, 0), /* MX53_PAD_GPIO_3__CCM_CLKO2 */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 5, 0x000, 0), /* MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 6, 0x8A0, 1), /* MX53_PAD_GPIO_3__USBOH3_USBH1_OC */
- IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 7, 0x858, 2), /* MX53_PAD_GPIO_3__MLB_MLBCLK */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 0, 0x7E0, 1), /* MX53_PAD_GPIO_6__ESAI1_SCKT */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 1, 0x000, 0), /* MX53_PAD_GPIO_6__GPIO1_6 */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 2, 0x828, 1), /* MX53_PAD_GPIO_6__I2C3_SDA */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 3, 0x000, 0), /* MX53_PAD_GPIO_6__CCM_CCM_OUT_0 */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 4, 0x000, 0), /* MX53_PAD_GPIO_6__CSU_CSU_INT_DEB */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 5, 0x000, 0), /* MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 6, 0x000, 0), /* MX53_PAD_GPIO_6__ESDHC2_LCTL */
- IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 7, 0x860, 2), /* MX53_PAD_GPIO_6__MLB_MLBSIG */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 0, 0x7D0, 1), /* MX53_PAD_GPIO_2__ESAI1_FST */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 1, 0x000, 0), /* MX53_PAD_GPIO_2__GPIO1_2 */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 2, 0x850, 2), /* MX53_PAD_GPIO_2__KPP_ROW_6 */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 3, 0x000, 0), /* MX53_PAD_GPIO_2__CCM_CCM_OUT_1 */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 4, 0x000, 0), /* MX53_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 5, 0x000, 0), /* MX53_PAD_GPIO_2__OBSERVE_MUX_OBSRV_INT_OUT2 */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 6, 0x000, 0), /* MX53_PAD_GPIO_2__ESDHC2_WP */
- IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 7, 0x85C, 2), /* MX53_PAD_GPIO_2__MLB_MLBDAT */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 0, 0x7D8, 1), /* MX53_PAD_GPIO_4__ESAI1_HCKT */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 1, 0x000, 0), /* MX53_PAD_GPIO_4__GPIO1_4 */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 2, 0x848, 2), /* MX53_PAD_GPIO_4__KPP_COL_7 */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 3, 0x000, 0), /* MX53_PAD_GPIO_4__CCM_CCM_OUT_2 */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 4, 0x000, 0), /* MX53_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 5, 0x000, 0), /* MX53_PAD_GPIO_4__OBSERVE_MUX_OBSRV_INT_OUT3 */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 6, 0x000, 0), /* MX53_PAD_GPIO_4__ESDHC2_CD */
- IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 7, 0x000, 0), /* MX53_PAD_GPIO_4__SCC_SEC_STATE */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 0, 0x7EC, 1), /* MX53_PAD_GPIO_5__ESAI1_TX2_RX3 */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 1, 0x000, 0), /* MX53_PAD_GPIO_5__GPIO1_5 */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 2, 0x854, 2), /* MX53_PAD_GPIO_5__KPP_ROW_7 */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 3, 0x000, 0), /* MX53_PAD_GPIO_5__CCM_CLKO */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 4, 0x000, 0), /* MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 5, 0x000, 0), /* MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 6, 0x824, 2), /* MX53_PAD_GPIO_5__I2C3_SCL */
- IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 7, 0x770, 1), /* MX53_PAD_GPIO_5__CCM_PLL1_BYP */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 0, 0x7F4, 1), /* MX53_PAD_GPIO_7__ESAI1_TX4_RX1 */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 1, 0x000, 0), /* MX53_PAD_GPIO_7__GPIO1_7 */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 2, 0x000, 0), /* MX53_PAD_GPIO_7__EPIT1_EPITO */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 3, 0x000, 0), /* MX53_PAD_GPIO_7__CAN1_TXCAN */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 4, 0x000, 0), /* MX53_PAD_GPIO_7__UART2_TXD_MUX */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 5, 0x80C, 1), /* MX53_PAD_GPIO_7__FIRI_RXD */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 6, 0x000, 0), /* MX53_PAD_GPIO_7__SPDIF_PLOCK */
- IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 7, 0x774, 1), /* MX53_PAD_GPIO_7__CCM_PLL2_BYP */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 0, 0x7F8, 1), /* MX53_PAD_GPIO_8__ESAI1_TX5_RX0 */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 1, 0x000, 0), /* MX53_PAD_GPIO_8__GPIO1_8 */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 2, 0x000, 0), /* MX53_PAD_GPIO_8__EPIT2_EPITO */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 3, 0x760, 2), /* MX53_PAD_GPIO_8__CAN1_RXCAN */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 4, 0x880, 5), /* MX53_PAD_GPIO_8__UART2_RXD_MUX */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 5, 0x000, 0), /* MX53_PAD_GPIO_8__FIRI_TXD */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 6, 0x000, 0), /* MX53_PAD_GPIO_8__SPDIF_SRCLK */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 7, 0x778, 1), /* MX53_PAD_GPIO_8__CCM_PLL3_BYP */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 0, 0x7F0, 1), /* MX53_PAD_GPIO_16__ESAI1_TX3_RX2 */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 1, 0x000, 0), /* MX53_PAD_GPIO_16__GPIO7_11 */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 2, 0x000, 0), /* MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 4, 0x000, 0), /* MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 5, 0x870, 1), /* MX53_PAD_GPIO_16__SPDIF_IN1 */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 6, 0x828, 2), /* MX53_PAD_GPIO_16__I2C3_SDA */
- IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 7, 0x000, 0), /* MX53_PAD_GPIO_16__SJC_DE_B */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 0, 0x7E4, 1), /* MX53_PAD_GPIO_17__ESAI1_TX0 */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 1, 0x000, 0), /* MX53_PAD_GPIO_17__GPIO7_12 */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 2, 0x868, 1), /* MX53_PAD_GPIO_17__SDMA_EXT_EVENT_0 */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 3, 0x810, 1), /* MX53_PAD_GPIO_17__GPC_PMIC_RDY */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 4, 0x000, 0), /* MX53_PAD_GPIO_17__RTC_CE_RTC_FSV_TRIG */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 5, 0x000, 0), /* MX53_PAD_GPIO_17__SPDIF_OUT1 */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 6, 0x000, 0), /* MX53_PAD_GPIO_17__IPU_SNOOP2 */
- IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 7, 0x000, 0), /* MX53_PAD_GPIO_17__SJC_JTAG_ACT */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 0, 0x7E8, 1), /* MX53_PAD_GPIO_18__ESAI1_TX1 */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 1, 0x000, 0), /* MX53_PAD_GPIO_18__GPIO7_13 */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 2, 0x86C, 1), /* MX53_PAD_GPIO_18__SDMA_EXT_EVENT_1 */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 3, 0x864, 1), /* MX53_PAD_GPIO_18__OWIRE_LINE */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 4, 0x000, 0), /* MX53_PAD_GPIO_18__RTC_CE_RTC_ALARM2_TRIG */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 5, 0x768, 1), /* MX53_PAD_GPIO_18__CCM_ASRC_EXT_CLK */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 6, 0x000, 0), /* MX53_PAD_GPIO_18__ESDHC1_LCTL */
- IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 7, 0x000, 0), /* MX53_PAD_GPIO_18__SRC_SYSTEM_RST */
+ MX53_PAD_RESERVE0 = 0,
+ MX53_PAD_RESERVE1 = 1,
+ MX53_PAD_RESERVE2 = 2,
+ MX53_PAD_RESERVE3 = 3,
+ MX53_PAD_RESERVE4 = 4,
+ MX53_PAD_RESERVE5 = 5,
+ MX53_PAD_RESERVE6 = 6,
+ MX53_PAD_RESERVE7 = 7,
+ MX53_PAD_GPIO_19 = 8,
+ MX53_PAD_KEY_COL0 = 9,
+ MX53_PAD_KEY_ROW0 = 10,
+ MX53_PAD_KEY_COL1 = 11,
+ MX53_PAD_KEY_ROW1 = 12,
+ MX53_PAD_KEY_COL2 = 13,
+ MX53_PAD_KEY_ROW2 = 14,
+ MX53_PAD_KEY_COL3 = 15,
+ MX53_PAD_KEY_ROW3 = 16,
+ MX53_PAD_KEY_COL4 = 17,
+ MX53_PAD_KEY_ROW4 = 18,
+ MX53_PAD_DI0_DISP_CLK = 19,
+ MX53_PAD_DI0_PIN15 = 20,
+ MX53_PAD_DI0_PIN2 = 21,
+ MX53_PAD_DI0_PIN3 = 22,
+ MX53_PAD_DI0_PIN4 = 23,
+ MX53_PAD_DISP0_DAT0 = 24,
+ MX53_PAD_DISP0_DAT1 = 25,
+ MX53_PAD_DISP0_DAT2 = 26,
+ MX53_PAD_DISP0_DAT3 = 27,
+ MX53_PAD_DISP0_DAT4 = 28,
+ MX53_PAD_DISP0_DAT5 = 29,
+ MX53_PAD_DISP0_DAT6 = 30,
+ MX53_PAD_DISP0_DAT7 = 31,
+ MX53_PAD_DISP0_DAT8 = 32,
+ MX53_PAD_DISP0_DAT9 = 33,
+ MX53_PAD_DISP0_DAT10 = 34,
+ MX53_PAD_DISP0_DAT11 = 35,
+ MX53_PAD_DISP0_DAT12 = 36,
+ MX53_PAD_DISP0_DAT13 = 37,
+ MX53_PAD_DISP0_DAT14 = 38,
+ MX53_PAD_DISP0_DAT15 = 39,
+ MX53_PAD_DISP0_DAT16 = 40,
+ MX53_PAD_DISP0_DAT17 = 41,
+ MX53_PAD_DISP0_DAT18 = 42,
+ MX53_PAD_DISP0_DAT19 = 43,
+ MX53_PAD_DISP0_DAT20 = 44,
+ MX53_PAD_DISP0_DAT21 = 45,
+ MX53_PAD_DISP0_DAT22 = 46,
+ MX53_PAD_DISP0_DAT23 = 47,
+ MX53_PAD_CSI0_PIXCLK = 48,
+ MX53_PAD_CSI0_MCLK = 49,
+ MX53_PAD_CSI0_DATA_EN = 50,
+ MX53_PAD_CSI0_VSYNC = 51,
+ MX53_PAD_CSI0_DAT4 = 52,
+ MX53_PAD_CSI0_DAT5 = 53,
+ MX53_PAD_CSI0_DAT6 = 54,
+ MX53_PAD_CSI0_DAT7 = 55,
+ MX53_PAD_CSI0_DAT8 = 56,
+ MX53_PAD_CSI0_DAT9 = 57,
+ MX53_PAD_CSI0_DAT10 = 58,
+ MX53_PAD_CSI0_DAT11 = 59,
+ MX53_PAD_CSI0_DAT12 = 60,
+ MX53_PAD_CSI0_DAT13 = 61,
+ MX53_PAD_CSI0_DAT14 = 62,
+ MX53_PAD_CSI0_DAT15 = 63,
+ MX53_PAD_CSI0_DAT16 = 64,
+ MX53_PAD_CSI0_DAT17 = 65,
+ MX53_PAD_CSI0_DAT18 = 66,
+ MX53_PAD_CSI0_DAT19 = 67,
+ MX53_PAD_EIM_A25 = 68,
+ MX53_PAD_EIM_EB2 = 69,
+ MX53_PAD_EIM_D16 = 70,
+ MX53_PAD_EIM_D17 = 71,
+ MX53_PAD_EIM_D18 = 72,
+ MX53_PAD_EIM_D19 = 73,
+ MX53_PAD_EIM_D20 = 74,
+ MX53_PAD_EIM_D21 = 75,
+ MX53_PAD_EIM_D22 = 76,
+ MX53_PAD_EIM_D23 = 77,
+ MX53_PAD_EIM_EB3 = 78,
+ MX53_PAD_EIM_D24 = 79,
+ MX53_PAD_EIM_D25 = 80,
+ MX53_PAD_EIM_D26 = 81,
+ MX53_PAD_EIM_D27 = 82,
+ MX53_PAD_EIM_D28 = 83,
+ MX53_PAD_EIM_D29 = 84,
+ MX53_PAD_EIM_D30 = 85,
+ MX53_PAD_EIM_D31 = 86,
+ MX53_PAD_EIM_A24 = 87,
+ MX53_PAD_EIM_A23 = 88,
+ MX53_PAD_EIM_A22 = 89,
+ MX53_PAD_EIM_A21 = 90,
+ MX53_PAD_EIM_A20 = 91,
+ MX53_PAD_EIM_A19 = 92,
+ MX53_PAD_EIM_A18 = 93,
+ MX53_PAD_EIM_A17 = 94,
+ MX53_PAD_EIM_A16 = 95,
+ MX53_PAD_EIM_CS0 = 96,
+ MX53_PAD_EIM_CS1 = 97,
+ MX53_PAD_EIM_OE = 98,
+ MX53_PAD_EIM_RW = 99,
+ MX53_PAD_EIM_LBA = 100,
+ MX53_PAD_EIM_EB0 = 101,
+ MX53_PAD_EIM_EB1 = 102,
+ MX53_PAD_EIM_DA0 = 103,
+ MX53_PAD_EIM_DA1 = 104,
+ MX53_PAD_EIM_DA2 = 105,
+ MX53_PAD_EIM_DA3 = 106,
+ MX53_PAD_EIM_DA4 = 107,
+ MX53_PAD_EIM_DA5 = 108,
+ MX53_PAD_EIM_DA6 = 109,
+ MX53_PAD_EIM_DA7 = 110,
+ MX53_PAD_EIM_DA8 = 111,
+ MX53_PAD_EIM_DA9 = 112,
+ MX53_PAD_EIM_DA10 = 113,
+ MX53_PAD_EIM_DA11 = 114,
+ MX53_PAD_EIM_DA12 = 115,
+ MX53_PAD_EIM_DA13 = 116,
+ MX53_PAD_EIM_DA14 = 117,
+ MX53_PAD_EIM_DA15 = 118,
+ MX53_PAD_NANDF_WE_B = 119,
+ MX53_PAD_NANDF_RE_B = 120,
+ MX53_PAD_EIM_WAIT = 121,
+ MX53_PAD_RESERVE8 = 122,
+ MX53_PAD_LVDS1_TX3_P = 123,
+ MX53_PAD_LVDS1_TX2_P = 124,
+ MX53_PAD_LVDS1_CLK_P = 125,
+ MX53_PAD_LVDS1_TX1_P = 126,
+ MX53_PAD_LVDS1_TX0_P = 127,
+ MX53_PAD_LVDS0_TX3_P = 128,
+ MX53_PAD_LVDS0_CLK_P = 129,
+ MX53_PAD_LVDS0_TX2_P = 130,
+ MX53_PAD_LVDS0_TX1_P = 131,
+ MX53_PAD_LVDS0_TX0_P = 132,
+ MX53_PAD_GPIO_10 = 133,
+ MX53_PAD_GPIO_11 = 134,
+ MX53_PAD_GPIO_12 = 135,
+ MX53_PAD_GPIO_13 = 136,
+ MX53_PAD_GPIO_14 = 137,
+ MX53_PAD_NANDF_CLE = 138,
+ MX53_PAD_NANDF_ALE = 139,
+ MX53_PAD_NANDF_WP_B = 140,
+ MX53_PAD_NANDF_RB0 = 141,
+ MX53_PAD_NANDF_CS0 = 142,
+ MX53_PAD_NANDF_CS1 = 143,
+ MX53_PAD_NANDF_CS2 = 144,
+ MX53_PAD_NANDF_CS3 = 145,
+ MX53_PAD_FEC_MDIO = 146,
+ MX53_PAD_FEC_REF_CLK = 147,
+ MX53_PAD_FEC_RX_ER = 148,
+ MX53_PAD_FEC_CRS_DV = 149,
+ MX53_PAD_FEC_RXD1 = 150,
+ MX53_PAD_FEC_RXD0 = 151,
+ MX53_PAD_FEC_TX_EN = 152,
+ MX53_PAD_FEC_TXD1 = 153,
+ MX53_PAD_FEC_TXD0 = 154,
+ MX53_PAD_FEC_MDC = 155,
+ MX53_PAD_PATA_DIOW = 156,
+ MX53_PAD_PATA_DMACK = 157,
+ MX53_PAD_PATA_DMARQ = 158,
+ MX53_PAD_PATA_BUFFER_EN = 159,
+ MX53_PAD_PATA_INTRQ = 160,
+ MX53_PAD_PATA_DIOR = 161,
+ MX53_PAD_PATA_RESET_B = 162,
+ MX53_PAD_PATA_IORDY = 163,
+ MX53_PAD_PATA_DA_0 = 164,
+ MX53_PAD_PATA_DA_1 = 165,
+ MX53_PAD_PATA_DA_2 = 166,
+ MX53_PAD_PATA_CS_0 = 167,
+ MX53_PAD_PATA_CS_1 = 168,
+ MX53_PAD_PATA_DATA0 = 169,
+ MX53_PAD_PATA_DATA1 = 170,
+ MX53_PAD_PATA_DATA2 = 171,
+ MX53_PAD_PATA_DATA3 = 172,
+ MX53_PAD_PATA_DATA4 = 173,
+ MX53_PAD_PATA_DATA5 = 174,
+ MX53_PAD_PATA_DATA6 = 175,
+ MX53_PAD_PATA_DATA7 = 176,
+ MX53_PAD_PATA_DATA8 = 177,
+ MX53_PAD_PATA_DATA9 = 178,
+ MX53_PAD_PATA_DATA10 = 179,
+ MX53_PAD_PATA_DATA11 = 180,
+ MX53_PAD_PATA_DATA12 = 181,
+ MX53_PAD_PATA_DATA13 = 182,
+ MX53_PAD_PATA_DATA14 = 183,
+ MX53_PAD_PATA_DATA15 = 184,
+ MX53_PAD_SD1_DATA0 = 185,
+ MX53_PAD_SD1_DATA1 = 186,
+ MX53_PAD_SD1_CMD = 187,
+ MX53_PAD_SD1_DATA2 = 188,
+ MX53_PAD_SD1_CLK = 189,
+ MX53_PAD_SD1_DATA3 = 190,
+ MX53_PAD_SD2_CLK = 191,
+ MX53_PAD_SD2_CMD = 192,
+ MX53_PAD_SD2_DATA3 = 193,
+ MX53_PAD_SD2_DATA2 = 194,
+ MX53_PAD_SD2_DATA1 = 195,
+ MX53_PAD_SD2_DATA0 = 196,
+ MX53_PAD_GPIO_0 = 197,
+ MX53_PAD_GPIO_1 = 198,
+ MX53_PAD_GPIO_9 = 199,
+ MX53_PAD_GPIO_3 = 200,
+ MX53_PAD_GPIO_6 = 201,
+ MX53_PAD_GPIO_2 = 202,
+ MX53_PAD_GPIO_4 = 203,
+ MX53_PAD_GPIO_5 = 204,
+ MX53_PAD_GPIO_7 = 205,
+ MX53_PAD_GPIO_8 = 206,
+ MX53_PAD_GPIO_16 = 207,
+ MX53_PAD_GPIO_17 = 208,
+ MX53_PAD_GPIO_18 = 209,
};
/* Pad names for the pinmux subsystem */
static const struct pinctrl_pin_desc imx53_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE7),
IMX_PINCTRL_PIN(MX53_PAD_GPIO_19),
IMX_PINCTRL_PIN(MX53_PAD_KEY_COL0),
IMX_PINCTRL_PIN(MX53_PAD_KEY_ROW0),
@@ -1517,6 +359,7 @@ static const struct pinctrl_pin_desc imx53_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX53_PAD_NANDF_WE_B),
IMX_PINCTRL_PIN(MX53_PAD_NANDF_RE_B),
IMX_PINCTRL_PIN(MX53_PAD_EIM_WAIT),
+ IMX_PINCTRL_PIN(MX53_PAD_RESERVE8),
IMX_PINCTRL_PIN(MX53_PAD_LVDS1_TX3_P),
IMX_PINCTRL_PIN(MX53_PAD_LVDS1_TX2_P),
IMX_PINCTRL_PIN(MX53_PAD_LVDS1_CLK_P),
@@ -1609,8 +452,6 @@ static const struct pinctrl_pin_desc imx53_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx53_pinctrl_info = {
.pins = imx53_pinctrl_pads,
.npins = ARRAY_SIZE(imx53_pinctrl_pads),
- .pin_regs = imx53_pin_regs,
- .npin_regs = ARRAY_SIZE(imx53_pin_regs),
};
static struct of_device_id imx53_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/pinctrl-imx6dl.c b/drivers/pinctrl/pinctrl-imx6dl.c
new file mode 100644
index 0000000..a76b724
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx6dl.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6dl_pads {
+ MX6DL_PAD_RESERVE0 = 0,
+ MX6DL_PAD_RESERVE1 = 1,
+ MX6DL_PAD_RESERVE2 = 2,
+ MX6DL_PAD_RESERVE3 = 3,
+ MX6DL_PAD_RESERVE4 = 4,
+ MX6DL_PAD_RESERVE5 = 5,
+ MX6DL_PAD_RESERVE6 = 6,
+ MX6DL_PAD_RESERVE7 = 7,
+ MX6DL_PAD_RESERVE8 = 8,
+ MX6DL_PAD_RESERVE9 = 9,
+ MX6DL_PAD_RESERVE10 = 10,
+ MX6DL_PAD_RESERVE11 = 11,
+ MX6DL_PAD_RESERVE12 = 12,
+ MX6DL_PAD_RESERVE13 = 13,
+ MX6DL_PAD_RESERVE14 = 14,
+ MX6DL_PAD_RESERVE15 = 15,
+ MX6DL_PAD_RESERVE16 = 16,
+ MX6DL_PAD_RESERVE17 = 17,
+ MX6DL_PAD_RESERVE18 = 18,
+ MX6DL_PAD_CSI0_DAT10 = 19,
+ MX6DL_PAD_CSI0_DAT11 = 20,
+ MX6DL_PAD_CSI0_DAT12 = 21,
+ MX6DL_PAD_CSI0_DAT13 = 22,
+ MX6DL_PAD_CSI0_DAT14 = 23,
+ MX6DL_PAD_CSI0_DAT15 = 24,
+ MX6DL_PAD_CSI0_DAT16 = 25,
+ MX6DL_PAD_CSI0_DAT17 = 26,
+ MX6DL_PAD_CSI0_DAT18 = 27,
+ MX6DL_PAD_CSI0_DAT19 = 28,
+ MX6DL_PAD_CSI0_DAT4 = 29,
+ MX6DL_PAD_CSI0_DAT5 = 30,
+ MX6DL_PAD_CSI0_DAT6 = 31,
+ MX6DL_PAD_CSI0_DAT7 = 32,
+ MX6DL_PAD_CSI0_DAT8 = 33,
+ MX6DL_PAD_CSI0_DAT9 = 34,
+ MX6DL_PAD_CSI0_DATA_EN = 35,
+ MX6DL_PAD_CSI0_MCLK = 36,
+ MX6DL_PAD_CSI0_PIXCLK = 37,
+ MX6DL_PAD_CSI0_VSYNC = 38,
+ MX6DL_PAD_DI0_DISP_CLK = 39,
+ MX6DL_PAD_DI0_PIN15 = 40,
+ MX6DL_PAD_DI0_PIN2 = 41,
+ MX6DL_PAD_DI0_PIN3 = 42,
+ MX6DL_PAD_DI0_PIN4 = 43,
+ MX6DL_PAD_DISP0_DAT0 = 44,
+ MX6DL_PAD_DISP0_DAT1 = 45,
+ MX6DL_PAD_DISP0_DAT10 = 46,
+ MX6DL_PAD_DISP0_DAT11 = 47,
+ MX6DL_PAD_DISP0_DAT12 = 48,
+ MX6DL_PAD_DISP0_DAT13 = 49,
+ MX6DL_PAD_DISP0_DAT14 = 50,
+ MX6DL_PAD_DISP0_DAT15 = 51,
+ MX6DL_PAD_DISP0_DAT16 = 52,
+ MX6DL_PAD_DISP0_DAT17 = 53,
+ MX6DL_PAD_DISP0_DAT18 = 54,
+ MX6DL_PAD_DISP0_DAT19 = 55,
+ MX6DL_PAD_DISP0_DAT2 = 56,
+ MX6DL_PAD_DISP0_DAT20 = 57,
+ MX6DL_PAD_DISP0_DAT21 = 58,
+ MX6DL_PAD_DISP0_DAT22 = 59,
+ MX6DL_PAD_DISP0_DAT23 = 60,
+ MX6DL_PAD_DISP0_DAT3 = 61,
+ MX6DL_PAD_DISP0_DAT4 = 62,
+ MX6DL_PAD_DISP0_DAT5 = 63,
+ MX6DL_PAD_DISP0_DAT6 = 64,
+ MX6DL_PAD_DISP0_DAT7 = 65,
+ MX6DL_PAD_DISP0_DAT8 = 66,
+ MX6DL_PAD_DISP0_DAT9 = 67,
+ MX6DL_PAD_EIM_A16 = 68,
+ MX6DL_PAD_EIM_A17 = 69,
+ MX6DL_PAD_EIM_A18 = 70,
+ MX6DL_PAD_EIM_A19 = 71,
+ MX6DL_PAD_EIM_A20 = 72,
+ MX6DL_PAD_EIM_A21 = 73,
+ MX6DL_PAD_EIM_A22 = 74,
+ MX6DL_PAD_EIM_A23 = 75,
+ MX6DL_PAD_EIM_A24 = 76,
+ MX6DL_PAD_EIM_A25 = 77,
+ MX6DL_PAD_EIM_BCLK = 78,
+ MX6DL_PAD_EIM_CS0 = 79,
+ MX6DL_PAD_EIM_CS1 = 80,
+ MX6DL_PAD_EIM_D16 = 81,
+ MX6DL_PAD_EIM_D17 = 82,
+ MX6DL_PAD_EIM_D18 = 83,
+ MX6DL_PAD_EIM_D19 = 84,
+ MX6DL_PAD_EIM_D20 = 85,
+ MX6DL_PAD_EIM_D21 = 86,
+ MX6DL_PAD_EIM_D22 = 87,
+ MX6DL_PAD_EIM_D23 = 88,
+ MX6DL_PAD_EIM_D24 = 89,
+ MX6DL_PAD_EIM_D25 = 90,
+ MX6DL_PAD_EIM_D26 = 91,
+ MX6DL_PAD_EIM_D27 = 92,
+ MX6DL_PAD_EIM_D28 = 93,
+ MX6DL_PAD_EIM_D29 = 94,
+ MX6DL_PAD_EIM_D30 = 95,
+ MX6DL_PAD_EIM_D31 = 96,
+ MX6DL_PAD_EIM_DA0 = 97,
+ MX6DL_PAD_EIM_DA1 = 98,
+ MX6DL_PAD_EIM_DA10 = 99,
+ MX6DL_PAD_EIM_DA11 = 100,
+ MX6DL_PAD_EIM_DA12 = 101,
+ MX6DL_PAD_EIM_DA13 = 102,
+ MX6DL_PAD_EIM_DA14 = 103,
+ MX6DL_PAD_EIM_DA15 = 104,
+ MX6DL_PAD_EIM_DA2 = 105,
+ MX6DL_PAD_EIM_DA3 = 106,
+ MX6DL_PAD_EIM_DA4 = 107,
+ MX6DL_PAD_EIM_DA5 = 108,
+ MX6DL_PAD_EIM_DA6 = 109,
+ MX6DL_PAD_EIM_DA7 = 110,
+ MX6DL_PAD_EIM_DA8 = 111,
+ MX6DL_PAD_EIM_DA9 = 112,
+ MX6DL_PAD_EIM_EB0 = 113,
+ MX6DL_PAD_EIM_EB1 = 114,
+ MX6DL_PAD_EIM_EB2 = 115,
+ MX6DL_PAD_EIM_EB3 = 116,
+ MX6DL_PAD_EIM_LBA = 117,
+ MX6DL_PAD_EIM_OE = 118,
+ MX6DL_PAD_EIM_RW = 119,
+ MX6DL_PAD_EIM_WAIT = 120,
+ MX6DL_PAD_ENET_CRS_DV = 121,
+ MX6DL_PAD_ENET_MDC = 122,
+ MX6DL_PAD_ENET_MDIO = 123,
+ MX6DL_PAD_ENET_REF_CLK = 124,
+ MX6DL_PAD_ENET_RX_ER = 125,
+ MX6DL_PAD_ENET_RXD0 = 126,
+ MX6DL_PAD_ENET_RXD1 = 127,
+ MX6DL_PAD_ENET_TX_EN = 128,
+ MX6DL_PAD_ENET_TXD0 = 129,
+ MX6DL_PAD_ENET_TXD1 = 130,
+ MX6DL_PAD_GPIO_0 = 131,
+ MX6DL_PAD_GPIO_1 = 132,
+ MX6DL_PAD_GPIO_16 = 133,
+ MX6DL_PAD_GPIO_17 = 134,
+ MX6DL_PAD_GPIO_18 = 135,
+ MX6DL_PAD_GPIO_19 = 136,
+ MX6DL_PAD_GPIO_2 = 137,
+ MX6DL_PAD_GPIO_3 = 138,
+ MX6DL_PAD_GPIO_4 = 139,
+ MX6DL_PAD_GPIO_5 = 140,
+ MX6DL_PAD_GPIO_6 = 141,
+ MX6DL_PAD_GPIO_7 = 142,
+ MX6DL_PAD_GPIO_8 = 143,
+ MX6DL_PAD_GPIO_9 = 144,
+ MX6DL_PAD_KEY_COL0 = 145,
+ MX6DL_PAD_KEY_COL1 = 146,
+ MX6DL_PAD_KEY_COL2 = 147,
+ MX6DL_PAD_KEY_COL3 = 148,
+ MX6DL_PAD_KEY_COL4 = 149,
+ MX6DL_PAD_KEY_ROW0 = 150,
+ MX6DL_PAD_KEY_ROW1 = 151,
+ MX6DL_PAD_KEY_ROW2 = 152,
+ MX6DL_PAD_KEY_ROW3 = 153,
+ MX6DL_PAD_KEY_ROW4 = 154,
+ MX6DL_PAD_NANDF_ALE = 155,
+ MX6DL_PAD_NANDF_CLE = 156,
+ MX6DL_PAD_NANDF_CS0 = 157,
+ MX6DL_PAD_NANDF_CS1 = 158,
+ MX6DL_PAD_NANDF_CS2 = 159,
+ MX6DL_PAD_NANDF_CS3 = 160,
+ MX6DL_PAD_NANDF_D0 = 161,
+ MX6DL_PAD_NANDF_D1 = 162,
+ MX6DL_PAD_NANDF_D2 = 163,
+ MX6DL_PAD_NANDF_D3 = 164,
+ MX6DL_PAD_NANDF_D4 = 165,
+ MX6DL_PAD_NANDF_D5 = 166,
+ MX6DL_PAD_NANDF_D6 = 167,
+ MX6DL_PAD_NANDF_D7 = 168,
+ MX6DL_PAD_NANDF_RB0 = 169,
+ MX6DL_PAD_NANDF_WP_B = 170,
+ MX6DL_PAD_RGMII_RD0 = 171,
+ MX6DL_PAD_RGMII_RD1 = 172,
+ MX6DL_PAD_RGMII_RD2 = 173,
+ MX6DL_PAD_RGMII_RD3 = 174,
+ MX6DL_PAD_RGMII_RX_CTL = 175,
+ MX6DL_PAD_RGMII_RXC = 176,
+ MX6DL_PAD_RGMII_TD0 = 177,
+ MX6DL_PAD_RGMII_TD1 = 178,
+ MX6DL_PAD_RGMII_TD2 = 179,
+ MX6DL_PAD_RGMII_TD3 = 180,
+ MX6DL_PAD_RGMII_TX_CTL = 181,
+ MX6DL_PAD_RGMII_TXC = 182,
+ MX6DL_PAD_SD1_CLK = 183,
+ MX6DL_PAD_SD1_CMD = 184,
+ MX6DL_PAD_SD1_DAT0 = 185,
+ MX6DL_PAD_SD1_DAT1 = 186,
+ MX6DL_PAD_SD1_DAT2 = 187,
+ MX6DL_PAD_SD1_DAT3 = 188,
+ MX6DL_PAD_SD2_CLK = 189,
+ MX6DL_PAD_SD2_CMD = 190,
+ MX6DL_PAD_SD2_DAT0 = 191,
+ MX6DL_PAD_SD2_DAT1 = 192,
+ MX6DL_PAD_SD2_DAT2 = 193,
+ MX6DL_PAD_SD2_DAT3 = 194,
+ MX6DL_PAD_SD3_CLK = 195,
+ MX6DL_PAD_SD3_CMD = 196,
+ MX6DL_PAD_SD3_DAT0 = 197,
+ MX6DL_PAD_SD3_DAT1 = 198,
+ MX6DL_PAD_SD3_DAT2 = 199,
+ MX6DL_PAD_SD3_DAT3 = 200,
+ MX6DL_PAD_SD3_DAT4 = 201,
+ MX6DL_PAD_SD3_DAT5 = 202,
+ MX6DL_PAD_SD3_DAT6 = 203,
+ MX6DL_PAD_SD3_DAT7 = 204,
+ MX6DL_PAD_SD3_RST = 205,
+ MX6DL_PAD_SD4_CLK = 206,
+ MX6DL_PAD_SD4_CMD = 207,
+ MX6DL_PAD_SD4_DAT0 = 208,
+ MX6DL_PAD_SD4_DAT1 = 209,
+ MX6DL_PAD_SD4_DAT2 = 210,
+ MX6DL_PAD_SD4_DAT3 = 211,
+ MX6DL_PAD_SD4_DAT4 = 212,
+ MX6DL_PAD_SD4_DAT5 = 213,
+ MX6DL_PAD_SD4_DAT6 = 214,
+ MX6DL_PAD_SD4_DAT7 = 215,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6dl_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE11),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE16),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE17),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RESERVE18),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT10),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT11),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT12),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT13),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT14),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT15),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT16),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT17),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT18),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT19),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT8),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DAT9),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_DATA_EN),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_MCLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_PIXCLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_CSI0_VSYNC),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DI0_DISP_CLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DI0_PIN15),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DI0_PIN2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DI0_PIN3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DI0_PIN4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT10),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT11),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT12),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT13),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT14),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT15),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT16),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT17),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT18),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT19),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT20),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT21),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT22),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT23),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT8),
+ IMX_PINCTRL_PIN(MX6DL_PAD_DISP0_DAT9),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A16),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A17),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A18),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A19),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A20),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A21),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A22),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A23),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A24),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_A25),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_BCLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_CS0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_CS1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D16),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D17),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D18),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D19),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D20),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D21),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D22),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D23),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D24),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D25),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D26),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D27),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D28),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D29),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D30),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_D31),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA10),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA11),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA12),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA13),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA14),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA15),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA8),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_DA9),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_EB0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_EB1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_EB2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_EB3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_LBA),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_OE),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_RW),
+ IMX_PINCTRL_PIN(MX6DL_PAD_EIM_WAIT),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_CRS_DV),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_MDC),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_REF_CLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_RX_ER),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_RXD0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_RXD1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_TX_EN),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_TXD0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_ENET_TXD1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_16),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_17),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_18),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_19),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_8),
+ IMX_PINCTRL_PIN(MX6DL_PAD_GPIO_9),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_COL0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_COL1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_COL2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_COL3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_COL4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_ROW0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_ROW1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_ROW2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_ROW3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_KEY_ROW4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_ALE),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_CLE),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_CS0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_CS1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_CS2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_CS3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_D7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_RB0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_NANDF_WP_B),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_RD0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_RD1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_RD2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_RD3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_RXC),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_TD0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_TD1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_TD2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_TD3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(MX6DL_PAD_RGMII_TXC),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD1_DAT0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD1_DAT1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD1_DAT2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD1_DAT3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD2_CLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD2_CMD),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD2_DAT0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD2_DAT1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD2_DAT2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD2_DAT3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_CLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_CMD),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_DAT7),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD3_RST),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_CLK),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_CMD),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT0),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT1),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT2),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT3),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT4),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT5),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT6),
+ IMX_PINCTRL_PIN(MX6DL_PAD_SD4_DAT7),
+};
+
+static struct imx_pinctrl_soc_info imx6dl_pinctrl_info = {
+ .pins = imx6dl_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx6dl_pinctrl_pads),
+};
+
+static struct of_device_id imx6dl_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx6dl-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx6dl_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx6dl_pinctrl_info);
+}
+
+static struct platform_driver imx6dl_pinctrl_driver = {
+ .driver = {
+ .name = "imx6dl-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(imx6dl_pinctrl_of_match),
+ },
+ .probe = imx6dl_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
+};
+
+static int __init imx6dl_pinctrl_init(void)
+{
+ return platform_driver_register(&imx6dl_pinctrl_driver);
+}
+arch_initcall(imx6dl_pinctrl_init);
+
+static void __exit imx6dl_pinctrl_exit(void)
+{
+ platform_driver_unregister(&imx6dl_pinctrl_driver);
+}
+module_exit(imx6dl_pinctrl_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale imx6dl pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
index 663346b..76dd9c4 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -23,1939 +23,245 @@
#include "pinctrl-imx.h"
enum imx6q_pads {
- MX6Q_PAD_SD2_DAT1 = 0,
- MX6Q_PAD_SD2_DAT2 = 1,
- MX6Q_PAD_SD2_DAT0 = 2,
- MX6Q_PAD_RGMII_TXC = 3,
- MX6Q_PAD_RGMII_TD0 = 4,
- MX6Q_PAD_RGMII_TD1 = 5,
- MX6Q_PAD_RGMII_TD2 = 6,
- MX6Q_PAD_RGMII_TD3 = 7,
- MX6Q_PAD_RGMII_RX_CTL = 8,
- MX6Q_PAD_RGMII_RD0 = 9,
- MX6Q_PAD_RGMII_TX_CTL = 10,
- MX6Q_PAD_RGMII_RD1 = 11,
- MX6Q_PAD_RGMII_RD2 = 12,
- MX6Q_PAD_RGMII_RD3 = 13,
- MX6Q_PAD_RGMII_RXC = 14,
- MX6Q_PAD_EIM_A25 = 15,
- MX6Q_PAD_EIM_EB2 = 16,
- MX6Q_PAD_EIM_D16 = 17,
- MX6Q_PAD_EIM_D17 = 18,
- MX6Q_PAD_EIM_D18 = 19,
- MX6Q_PAD_EIM_D19 = 20,
- MX6Q_PAD_EIM_D20 = 21,
- MX6Q_PAD_EIM_D21 = 22,
- MX6Q_PAD_EIM_D22 = 23,
- MX6Q_PAD_EIM_D23 = 24,
- MX6Q_PAD_EIM_EB3 = 25,
- MX6Q_PAD_EIM_D24 = 26,
- MX6Q_PAD_EIM_D25 = 27,
- MX6Q_PAD_EIM_D26 = 28,
- MX6Q_PAD_EIM_D27 = 29,
- MX6Q_PAD_EIM_D28 = 30,
- MX6Q_PAD_EIM_D29 = 31,
- MX6Q_PAD_EIM_D30 = 32,
- MX6Q_PAD_EIM_D31 = 33,
- MX6Q_PAD_EIM_A24 = 34,
- MX6Q_PAD_EIM_A23 = 35,
- MX6Q_PAD_EIM_A22 = 36,
- MX6Q_PAD_EIM_A21 = 37,
- MX6Q_PAD_EIM_A20 = 38,
- MX6Q_PAD_EIM_A19 = 39,
- MX6Q_PAD_EIM_A18 = 40,
- MX6Q_PAD_EIM_A17 = 41,
- MX6Q_PAD_EIM_A16 = 42,
- MX6Q_PAD_EIM_CS0 = 43,
- MX6Q_PAD_EIM_CS1 = 44,
- MX6Q_PAD_EIM_OE = 45,
- MX6Q_PAD_EIM_RW = 46,
- MX6Q_PAD_EIM_LBA = 47,
- MX6Q_PAD_EIM_EB0 = 48,
- MX6Q_PAD_EIM_EB1 = 49,
- MX6Q_PAD_EIM_DA0 = 50,
- MX6Q_PAD_EIM_DA1 = 51,
- MX6Q_PAD_EIM_DA2 = 52,
- MX6Q_PAD_EIM_DA3 = 53,
- MX6Q_PAD_EIM_DA4 = 54,
- MX6Q_PAD_EIM_DA5 = 55,
- MX6Q_PAD_EIM_DA6 = 56,
- MX6Q_PAD_EIM_DA7 = 57,
- MX6Q_PAD_EIM_DA8 = 58,
- MX6Q_PAD_EIM_DA9 = 59,
- MX6Q_PAD_EIM_DA10 = 60,
- MX6Q_PAD_EIM_DA11 = 61,
- MX6Q_PAD_EIM_DA12 = 62,
- MX6Q_PAD_EIM_DA13 = 63,
- MX6Q_PAD_EIM_DA14 = 64,
- MX6Q_PAD_EIM_DA15 = 65,
- MX6Q_PAD_EIM_WAIT = 66,
- MX6Q_PAD_EIM_BCLK = 67,
- MX6Q_PAD_DI0_DISP_CLK = 68,
- MX6Q_PAD_DI0_PIN15 = 69,
- MX6Q_PAD_DI0_PIN2 = 70,
- MX6Q_PAD_DI0_PIN3 = 71,
- MX6Q_PAD_DI0_PIN4 = 72,
- MX6Q_PAD_DISP0_DAT0 = 73,
- MX6Q_PAD_DISP0_DAT1 = 74,
- MX6Q_PAD_DISP0_DAT2 = 75,
- MX6Q_PAD_DISP0_DAT3 = 76,
- MX6Q_PAD_DISP0_DAT4 = 77,
- MX6Q_PAD_DISP0_DAT5 = 78,
- MX6Q_PAD_DISP0_DAT6 = 79,
- MX6Q_PAD_DISP0_DAT7 = 80,
- MX6Q_PAD_DISP0_DAT8 = 81,
- MX6Q_PAD_DISP0_DAT9 = 82,
- MX6Q_PAD_DISP0_DAT10 = 83,
- MX6Q_PAD_DISP0_DAT11 = 84,
- MX6Q_PAD_DISP0_DAT12 = 85,
- MX6Q_PAD_DISP0_DAT13 = 86,
- MX6Q_PAD_DISP0_DAT14 = 87,
- MX6Q_PAD_DISP0_DAT15 = 88,
- MX6Q_PAD_DISP0_DAT16 = 89,
- MX6Q_PAD_DISP0_DAT17 = 90,
- MX6Q_PAD_DISP0_DAT18 = 91,
- MX6Q_PAD_DISP0_DAT19 = 92,
- MX6Q_PAD_DISP0_DAT20 = 93,
- MX6Q_PAD_DISP0_DAT21 = 94,
- MX6Q_PAD_DISP0_DAT22 = 95,
- MX6Q_PAD_DISP0_DAT23 = 96,
- MX6Q_PAD_ENET_MDIO = 97,
- MX6Q_PAD_ENET_REF_CLK = 98,
- MX6Q_PAD_ENET_RX_ER = 99,
- MX6Q_PAD_ENET_CRS_DV = 100,
- MX6Q_PAD_ENET_RXD1 = 101,
- MX6Q_PAD_ENET_RXD0 = 102,
- MX6Q_PAD_ENET_TX_EN = 103,
- MX6Q_PAD_ENET_TXD1 = 104,
- MX6Q_PAD_ENET_TXD0 = 105,
- MX6Q_PAD_ENET_MDC = 106,
- MX6Q_PAD_DRAM_D40 = 107,
- MX6Q_PAD_DRAM_D41 = 108,
- MX6Q_PAD_DRAM_D42 = 109,
- MX6Q_PAD_DRAM_D43 = 110,
- MX6Q_PAD_DRAM_D44 = 111,
- MX6Q_PAD_DRAM_D45 = 112,
- MX6Q_PAD_DRAM_D46 = 113,
- MX6Q_PAD_DRAM_D47 = 114,
- MX6Q_PAD_DRAM_SDQS5 = 115,
- MX6Q_PAD_DRAM_DQM5 = 116,
- MX6Q_PAD_DRAM_D32 = 117,
- MX6Q_PAD_DRAM_D33 = 118,
- MX6Q_PAD_DRAM_D34 = 119,
- MX6Q_PAD_DRAM_D35 = 120,
- MX6Q_PAD_DRAM_D36 = 121,
- MX6Q_PAD_DRAM_D37 = 122,
- MX6Q_PAD_DRAM_D38 = 123,
- MX6Q_PAD_DRAM_D39 = 124,
- MX6Q_PAD_DRAM_DQM4 = 125,
- MX6Q_PAD_DRAM_SDQS4 = 126,
- MX6Q_PAD_DRAM_D24 = 127,
- MX6Q_PAD_DRAM_D25 = 128,
- MX6Q_PAD_DRAM_D26 = 129,
- MX6Q_PAD_DRAM_D27 = 130,
- MX6Q_PAD_DRAM_D28 = 131,
- MX6Q_PAD_DRAM_D29 = 132,
- MX6Q_PAD_DRAM_SDQS3 = 133,
- MX6Q_PAD_DRAM_D30 = 134,
- MX6Q_PAD_DRAM_D31 = 135,
- MX6Q_PAD_DRAM_DQM3 = 136,
- MX6Q_PAD_DRAM_D16 = 137,
- MX6Q_PAD_DRAM_D17 = 138,
- MX6Q_PAD_DRAM_D18 = 139,
- MX6Q_PAD_DRAM_D19 = 140,
- MX6Q_PAD_DRAM_D20 = 141,
- MX6Q_PAD_DRAM_D21 = 142,
- MX6Q_PAD_DRAM_D22 = 143,
- MX6Q_PAD_DRAM_SDQS2 = 144,
- MX6Q_PAD_DRAM_D23 = 145,
- MX6Q_PAD_DRAM_DQM2 = 146,
- MX6Q_PAD_DRAM_A0 = 147,
- MX6Q_PAD_DRAM_A1 = 148,
- MX6Q_PAD_DRAM_A2 = 149,
- MX6Q_PAD_DRAM_A3 = 150,
- MX6Q_PAD_DRAM_A4 = 151,
- MX6Q_PAD_DRAM_A5 = 152,
- MX6Q_PAD_DRAM_A6 = 153,
- MX6Q_PAD_DRAM_A7 = 154,
- MX6Q_PAD_DRAM_A8 = 155,
- MX6Q_PAD_DRAM_A9 = 156,
- MX6Q_PAD_DRAM_A10 = 157,
- MX6Q_PAD_DRAM_A11 = 158,
- MX6Q_PAD_DRAM_A12 = 159,
- MX6Q_PAD_DRAM_A13 = 160,
- MX6Q_PAD_DRAM_A14 = 161,
- MX6Q_PAD_DRAM_A15 = 162,
- MX6Q_PAD_DRAM_CAS = 163,
- MX6Q_PAD_DRAM_CS0 = 164,
- MX6Q_PAD_DRAM_CS1 = 165,
- MX6Q_PAD_DRAM_RAS = 166,
- MX6Q_PAD_DRAM_RESET = 167,
- MX6Q_PAD_DRAM_SDBA0 = 168,
- MX6Q_PAD_DRAM_SDBA1 = 169,
- MX6Q_PAD_DRAM_SDCLK_0 = 170,
- MX6Q_PAD_DRAM_SDBA2 = 171,
- MX6Q_PAD_DRAM_SDCKE0 = 172,
- MX6Q_PAD_DRAM_SDCLK_1 = 173,
- MX6Q_PAD_DRAM_SDCKE1 = 174,
- MX6Q_PAD_DRAM_SDODT0 = 175,
- MX6Q_PAD_DRAM_SDODT1 = 176,
- MX6Q_PAD_DRAM_SDWE = 177,
- MX6Q_PAD_DRAM_D0 = 178,
- MX6Q_PAD_DRAM_D1 = 179,
- MX6Q_PAD_DRAM_D2 = 180,
- MX6Q_PAD_DRAM_D3 = 181,
- MX6Q_PAD_DRAM_D4 = 182,
- MX6Q_PAD_DRAM_D5 = 183,
- MX6Q_PAD_DRAM_SDQS0 = 184,
- MX6Q_PAD_DRAM_D6 = 185,
- MX6Q_PAD_DRAM_D7 = 186,
- MX6Q_PAD_DRAM_DQM0 = 187,
- MX6Q_PAD_DRAM_D8 = 188,
- MX6Q_PAD_DRAM_D9 = 189,
- MX6Q_PAD_DRAM_D10 = 190,
- MX6Q_PAD_DRAM_D11 = 191,
- MX6Q_PAD_DRAM_D12 = 192,
- MX6Q_PAD_DRAM_D13 = 193,
- MX6Q_PAD_DRAM_D14 = 194,
- MX6Q_PAD_DRAM_SDQS1 = 195,
- MX6Q_PAD_DRAM_D15 = 196,
- MX6Q_PAD_DRAM_DQM1 = 197,
- MX6Q_PAD_DRAM_D48 = 198,
- MX6Q_PAD_DRAM_D49 = 199,
- MX6Q_PAD_DRAM_D50 = 200,
- MX6Q_PAD_DRAM_D51 = 201,
- MX6Q_PAD_DRAM_D52 = 202,
- MX6Q_PAD_DRAM_D53 = 203,
- MX6Q_PAD_DRAM_D54 = 204,
- MX6Q_PAD_DRAM_D55 = 205,
- MX6Q_PAD_DRAM_SDQS6 = 206,
- MX6Q_PAD_DRAM_DQM6 = 207,
- MX6Q_PAD_DRAM_D56 = 208,
- MX6Q_PAD_DRAM_SDQS7 = 209,
- MX6Q_PAD_DRAM_D57 = 210,
- MX6Q_PAD_DRAM_D58 = 211,
- MX6Q_PAD_DRAM_D59 = 212,
- MX6Q_PAD_DRAM_D60 = 213,
- MX6Q_PAD_DRAM_DQM7 = 214,
- MX6Q_PAD_DRAM_D61 = 215,
- MX6Q_PAD_DRAM_D62 = 216,
- MX6Q_PAD_DRAM_D63 = 217,
- MX6Q_PAD_KEY_COL0 = 218,
- MX6Q_PAD_KEY_ROW0 = 219,
- MX6Q_PAD_KEY_COL1 = 220,
- MX6Q_PAD_KEY_ROW1 = 221,
- MX6Q_PAD_KEY_COL2 = 222,
- MX6Q_PAD_KEY_ROW2 = 223,
- MX6Q_PAD_KEY_COL3 = 224,
- MX6Q_PAD_KEY_ROW3 = 225,
- MX6Q_PAD_KEY_COL4 = 226,
- MX6Q_PAD_KEY_ROW4 = 227,
- MX6Q_PAD_GPIO_0 = 228,
- MX6Q_PAD_GPIO_1 = 229,
- MX6Q_PAD_GPIO_9 = 230,
- MX6Q_PAD_GPIO_3 = 231,
- MX6Q_PAD_GPIO_6 = 232,
- MX6Q_PAD_GPIO_2 = 233,
- MX6Q_PAD_GPIO_4 = 234,
- MX6Q_PAD_GPIO_5 = 235,
- MX6Q_PAD_GPIO_7 = 236,
- MX6Q_PAD_GPIO_8 = 237,
- MX6Q_PAD_GPIO_16 = 238,
- MX6Q_PAD_GPIO_17 = 239,
- MX6Q_PAD_GPIO_18 = 240,
- MX6Q_PAD_GPIO_19 = 241,
- MX6Q_PAD_CSI0_PIXCLK = 242,
- MX6Q_PAD_CSI0_MCLK = 243,
- MX6Q_PAD_CSI0_DATA_EN = 244,
- MX6Q_PAD_CSI0_VSYNC = 245,
- MX6Q_PAD_CSI0_DAT4 = 246,
- MX6Q_PAD_CSI0_DAT5 = 247,
- MX6Q_PAD_CSI0_DAT6 = 248,
- MX6Q_PAD_CSI0_DAT7 = 249,
- MX6Q_PAD_CSI0_DAT8 = 250,
- MX6Q_PAD_CSI0_DAT9 = 251,
- MX6Q_PAD_CSI0_DAT10 = 252,
- MX6Q_PAD_CSI0_DAT11 = 253,
- MX6Q_PAD_CSI0_DAT12 = 254,
- MX6Q_PAD_CSI0_DAT13 = 255,
- MX6Q_PAD_CSI0_DAT14 = 256,
- MX6Q_PAD_CSI0_DAT15 = 257,
- MX6Q_PAD_CSI0_DAT16 = 258,
- MX6Q_PAD_CSI0_DAT17 = 259,
- MX6Q_PAD_CSI0_DAT18 = 260,
- MX6Q_PAD_CSI0_DAT19 = 261,
- MX6Q_PAD_JTAG_TMS = 262,
- MX6Q_PAD_JTAG_MOD = 263,
- MX6Q_PAD_JTAG_TRSTB = 264,
- MX6Q_PAD_JTAG_TDI = 265,
- MX6Q_PAD_JTAG_TCK = 266,
- MX6Q_PAD_JTAG_TDO = 267,
- MX6Q_PAD_LVDS1_TX3_P = 268,
- MX6Q_PAD_LVDS1_TX2_P = 269,
- MX6Q_PAD_LVDS1_CLK_P = 270,
- MX6Q_PAD_LVDS1_TX1_P = 271,
- MX6Q_PAD_LVDS1_TX0_P = 272,
- MX6Q_PAD_LVDS0_TX3_P = 273,
- MX6Q_PAD_LVDS0_CLK_P = 274,
- MX6Q_PAD_LVDS0_TX2_P = 275,
- MX6Q_PAD_LVDS0_TX1_P = 276,
- MX6Q_PAD_LVDS0_TX0_P = 277,
- MX6Q_PAD_TAMPER = 278,
- MX6Q_PAD_PMIC_ON_REQ = 279,
- MX6Q_PAD_PMIC_STBY_REQ = 280,
- MX6Q_PAD_POR_B = 281,
- MX6Q_PAD_BOOT_MODE1 = 282,
- MX6Q_PAD_RESET_IN_B = 283,
- MX6Q_PAD_BOOT_MODE0 = 284,
- MX6Q_PAD_TEST_MODE = 285,
- MX6Q_PAD_SD3_DAT7 = 286,
- MX6Q_PAD_SD3_DAT6 = 287,
- MX6Q_PAD_SD3_DAT5 = 288,
- MX6Q_PAD_SD3_DAT4 = 289,
- MX6Q_PAD_SD3_CMD = 290,
- MX6Q_PAD_SD3_CLK = 291,
- MX6Q_PAD_SD3_DAT0 = 292,
- MX6Q_PAD_SD3_DAT1 = 293,
- MX6Q_PAD_SD3_DAT2 = 294,
- MX6Q_PAD_SD3_DAT3 = 295,
- MX6Q_PAD_SD3_RST = 296,
- MX6Q_PAD_NANDF_CLE = 297,
- MX6Q_PAD_NANDF_ALE = 298,
- MX6Q_PAD_NANDF_WP_B = 299,
- MX6Q_PAD_NANDF_RB0 = 300,
- MX6Q_PAD_NANDF_CS0 = 301,
- MX6Q_PAD_NANDF_CS1 = 302,
- MX6Q_PAD_NANDF_CS2 = 303,
- MX6Q_PAD_NANDF_CS3 = 304,
- MX6Q_PAD_SD4_CMD = 305,
- MX6Q_PAD_SD4_CLK = 306,
- MX6Q_PAD_NANDF_D0 = 307,
- MX6Q_PAD_NANDF_D1 = 308,
- MX6Q_PAD_NANDF_D2 = 309,
- MX6Q_PAD_NANDF_D3 = 310,
- MX6Q_PAD_NANDF_D4 = 311,
- MX6Q_PAD_NANDF_D5 = 312,
- MX6Q_PAD_NANDF_D6 = 313,
- MX6Q_PAD_NANDF_D7 = 314,
- MX6Q_PAD_SD4_DAT0 = 315,
- MX6Q_PAD_SD4_DAT1 = 316,
- MX6Q_PAD_SD4_DAT2 = 317,
- MX6Q_PAD_SD4_DAT3 = 318,
- MX6Q_PAD_SD4_DAT4 = 319,
- MX6Q_PAD_SD4_DAT5 = 320,
- MX6Q_PAD_SD4_DAT6 = 321,
- MX6Q_PAD_SD4_DAT7 = 322,
- MX6Q_PAD_SD1_DAT1 = 323,
- MX6Q_PAD_SD1_DAT0 = 324,
- MX6Q_PAD_SD1_DAT3 = 325,
- MX6Q_PAD_SD1_CMD = 326,
- MX6Q_PAD_SD1_DAT2 = 327,
- MX6Q_PAD_SD1_CLK = 328,
- MX6Q_PAD_SD2_CLK = 329,
- MX6Q_PAD_SD2_CMD = 330,
- MX6Q_PAD_SD2_DAT3 = 331,
-};
-
-/* imx6q register maps */
-static struct imx_pin_reg imx6q_pin_regs[] = {
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__USDHC2_DAT1 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 1, 0x0834, 0), /* MX6Q_PAD_SD2_DAT1__ECSPI5_SS0 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 2, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__WEIM_WEIM_CS_2 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 3, 0x07C8, 0), /* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 4, 0x08F0, 0), /* MX6Q_PAD_SD2_DAT1__KPP_COL_7 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__GPIO_1_14 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__CCM_WAIT */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__ANATOP_TESTO_0 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__USDHC2_DAT2 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 1, 0x0838, 0), /* MX6Q_PAD_SD2_DAT2__ECSPI5_SS1 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 2, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__WEIM_WEIM_CS_3 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 3, 0x07B8, 0), /* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 4, 0x08F8, 0), /* MX6Q_PAD_SD2_DAT2__KPP_ROW_6 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__CCM_STOP */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__ANATOP_TESTO_1 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__USDHC2_DAT0 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 1, 0x082C, 0), /* MX6Q_PAD_SD2_DAT0__ECSPI5_MISO */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 3, 0x07B4, 0), /* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 4, 0x08FC, 0), /* MX6Q_PAD_SD2_DAT0__KPP_ROW_7 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__GPIO_1_15 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__DCIC2_DCIC_OUT */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__TESTO_2 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__USBOH3_H2_DATA */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__ENET_RGMII_TXC */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 2, 0x0918, 0), /* MX6Q_PAD_RGMII_TXC__SPDIF_SPDIF_EXTCLK */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__GPIO_6_19 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__MIPI_CORE_DPHY_IN_0 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__ANATOP_24M_OUT */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__MIPI_HSI_CRL_TX_RDY */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__ENET_RGMII_TD0 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__GPIO_6_20 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__MIPI_CORE_DPHY_IN_1 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__MIPI_HSI_CRL_RX_FLG */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__ENET_RGMII_TD1 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__GPIO_6_21 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__MIPI_CORE_DPHY_IN_2 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__CCM_PLL3_BYP */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__MIPI_HSI_CRL_RX_DTA */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__ENET_RGMII_TD2 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__GPIO_6_22 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__MIPI_CORE_DPHY_IN_3 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__CCM_PLL2_BYP */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__MIPI_HSI_CRL_RX_WAK */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__ENET_RGMII_TD3 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__GPIO_6_23 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__MIPI_CORE_DPHY_IN_4 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RX_CTL__USBOH3_H3_DATA */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 1, 0x0858, 0), /* MX6Q_PAD_RGMII_RX_CTL__RGMII_RX_CTL */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RX_CTL__GPIO_6_24 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RX_CTL__MIPI_DPHY_IN_5 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD0__MIPI_HSI_CRL_RX_RDY */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 1, 0x0848, 0), /* MX6Q_PAD_RGMII_RD0__ENET_RGMII_RD0 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD0__GPIO_6_25 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD0__MIPI_CORE_DPHY_IN_6 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__USBOH3_H2_STROBE */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__RGMII_TX_CTL */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__GPIO_6_26 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__CORE_DPHY_IN_7 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 7, 0x083C, 0), /* MX6Q_PAD_RGMII_TX_CTL__ANATOP_REF_OUT */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__MIPI_HSI_CTRL_TX_FL */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 1, 0x084C, 0), /* MX6Q_PAD_RGMII_RD1__ENET_RGMII_RD1 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__GPIO_6_27 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__CORE_DPHY_TEST_IN_8 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__SJC_FAIL */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD2__MIPI_HSI_CRL_TX_DTA */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 1, 0x0850, 0), /* MX6Q_PAD_RGMII_RD2__ENET_RGMII_RD2 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD2__GPIO_6_28 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD2__MIPI_CORE_DPHY_IN_9 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD3__MIPI_HSI_CRL_TX_WAK */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 1, 0x0854, 0), /* MX6Q_PAD_RGMII_RD3__ENET_RGMII_RD3 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD3__GPIO_6_29 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD3__MIPI_CORE_DPHY_IN10 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RXC__USBOH3_H3_STROBE */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 1, 0x0844, 0), /* MX6Q_PAD_RGMII_RXC__ENET_RGMII_RXC */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RXC__GPIO_6_30 */
- IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RXC__MIPI_CORE_DPHY_IN11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A25__WEIM_WEIM_A_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A25__ECSPI4_SS1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 2, 0x0000, 0), /* MX6Q_PAD_EIM_A25__ECSPI2_RDY */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A25__IPU1_DI1_PIN12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A25__IPU1_DI0_D1_CS */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A25__GPIO_5_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 6, 0x088C, 0), /* MX6Q_PAD_EIM_A25__HDMI_TX_CEC_LINE */
- IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A25__PL301_PER1_HBURST_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB2__WEIM_WEIM_EB_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 1, 0x0800, 0), /* MX6Q_PAD_EIM_EB2__ECSPI1_SS0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 2, 0x07EC, 0), /* MX6Q_PAD_EIM_EB2__CCM_DI1_EXT_CLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 3, 0x08D4, 0), /* MX6Q_PAD_EIM_EB2__IPU2_CSI1_D_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 4, 0x0890, 0), /* MX6Q_PAD_EIM_EB2__HDMI_TX_DDC_SCL */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB2__GPIO_2_30 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 6, 0x08A0, 0), /* MX6Q_PAD_EIM_EB2__I2C2_SCL */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB2__SRC_BT_CFG_30 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D16__WEIM_WEIM_D_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 1, 0x07F4, 0), /* MX6Q_PAD_EIM_D16__ECSPI1_SCLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D16__IPU1_DI0_PIN5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 3, 0x08D0, 0), /* MX6Q_PAD_EIM_D16__IPU2_CSI1_D_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 4, 0x0894, 0), /* MX6Q_PAD_EIM_D16__HDMI_TX_DDC_SDA */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D16__GPIO_3_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 6, 0x08A4, 0), /* MX6Q_PAD_EIM_D16__I2C2_SDA */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D17__WEIM_WEIM_D_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 1, 0x07F8, 0), /* MX6Q_PAD_EIM_D17__ECSPI1_MISO */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D17__IPU1_DI0_PIN6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 3, 0x08E0, 0), /* MX6Q_PAD_EIM_D17__IPU2_CSI1_PIXCLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D17__DCIC1_DCIC_OUT */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D17__GPIO_3_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 6, 0x08A8, 0), /* MX6Q_PAD_EIM_D17__I2C3_SCL */
- IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D17__PL301_PER1_HBURST_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D18__WEIM_WEIM_D_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 1, 0x07FC, 0), /* MX6Q_PAD_EIM_D18__ECSPI1_MOSI */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D18__IPU1_DI0_PIN7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 3, 0x08CC, 0), /* MX6Q_PAD_EIM_D18__IPU2_CSI1_D_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D18__IPU1_DI1_D0_CS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D18__GPIO_3_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 6, 0x08AC, 0), /* MX6Q_PAD_EIM_D18__I2C3_SDA */
- IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D18__PL301_PER1_HBURST_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D19__WEIM_WEIM_D_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 1, 0x0804, 0), /* MX6Q_PAD_EIM_D19__ECSPI1_SS1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D19__IPU1_DI0_PIN8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 3, 0x08C8, 0), /* MX6Q_PAD_EIM_D19__IPU2_CSI1_D_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 4, 0x091C, 0), /* MX6Q_PAD_EIM_D19__UART1_CTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D19__EPIT1_EPITO */
- IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D19__PL301_PER1_HRESP */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D20__WEIM_WEIM_D_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 1, 0x0824, 0), /* MX6Q_PAD_EIM_D20__ECSPI4_SS0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D20__IPU1_DI0_PIN16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 3, 0x08C4, 0), /* MX6Q_PAD_EIM_D20__IPU2_CSI1_D_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 4, 0x091C, 1), /* MX6Q_PAD_EIM_D20__UART1_RTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D20__GPIO_3_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D20__EPIT2_EPITO */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D21__WEIM_WEIM_D_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D21__ECSPI4_SCLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D21__IPU1_DI0_PIN17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 3, 0x08B4, 0), /* MX6Q_PAD_EIM_D21__IPU2_CSI1_D_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 4, 0x0944, 0), /* MX6Q_PAD_EIM_D21__USBOH3_USBOTG_OC */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D21__GPIO_3_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 6, 0x0898, 0), /* MX6Q_PAD_EIM_D21__I2C1_SCL */
- IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 7, 0x0914, 0), /* MX6Q_PAD_EIM_D21__SPDIF_IN1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D22__WEIM_WEIM_D_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D22__ECSPI4_MISO */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D22__IPU1_DI0_PIN1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 3, 0x08B0, 0), /* MX6Q_PAD_EIM_D22__IPU2_CSI1_D_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D22__USBOH3_USBOTG_PWR */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D22__SPDIF_OUT1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D22__PL301_PER1_HWRITE */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D23__WEIM_WEIM_D_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D23__IPU1_DI0_D0_CS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 2, 0x092C, 0), /* MX6Q_PAD_EIM_D23__UART3_CTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 3, 0x0000, 0), /* MX6Q_PAD_EIM_D23__UART1_DCD */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 4, 0x08D8, 0), /* MX6Q_PAD_EIM_D23__IPU2_CSI1_DATA_EN */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D23__GPIO_3_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D23__IPU1_DI1_PIN2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D23__IPU1_DI1_PIN14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__WEIM_WEIM_EB_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__ECSPI4_RDY */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 2, 0x092C, 1), /* MX6Q_PAD_EIM_EB3__UART3_RTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__UART1_RI */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 4, 0x08DC, 0), /* MX6Q_PAD_EIM_EB3__IPU2_CSI1_HSYNC */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__GPIO_2_31 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__IPU1_DI1_PIN3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__SRC_BT_CFG_31 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D24__WEIM_WEIM_D_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D24__ECSPI4_SS2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D24__UART3_TXD */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 3, 0x0808, 0), /* MX6Q_PAD_EIM_D24__ECSPI1_SS2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D24__ECSPI2_SS2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D24__GPIO_3_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 6, 0x07D8, 0), /* MX6Q_PAD_EIM_D24__AUDMUX_AUD5_RXFS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D24__UART1_DTR */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D25__WEIM_WEIM_D_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D25__ECSPI4_SS3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 2, 0x0930, 1), /* MX6Q_PAD_EIM_D25__UART3_RXD */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 3, 0x080C, 0), /* MX6Q_PAD_EIM_D25__ECSPI1_SS3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D25__ECSPI2_SS3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D25__GPIO_3_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 6, 0x07D4, 0), /* MX6Q_PAD_EIM_D25__AUDMUX_AUD5_RXC */
- IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D25__UART1_DSR */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D26__WEIM_WEIM_D_26 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_DI1_PIN11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_CSI0_D_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 3, 0x08C0, 0), /* MX6Q_PAD_EIM_D26__IPU2_CSI1_D_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D26__UART2_TXD */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D26__GPIO_3_26 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_SISG_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_DISP1_DAT_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D27__WEIM_WEIM_D_27 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_DI1_PIN13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_CSI0_D_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 3, 0x08BC, 0), /* MX6Q_PAD_EIM_D27__IPU2_CSI1_D_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 4, 0x0928, 1), /* MX6Q_PAD_EIM_D27__UART2_RXD */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D27__GPIO_3_27 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_SISG_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_DISP1_DAT_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D28__WEIM_WEIM_D_28 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 1, 0x089C, 0), /* MX6Q_PAD_EIM_D28__I2C1_SDA */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D28__ECSPI4_MOSI */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 3, 0x08B8, 0), /* MX6Q_PAD_EIM_D28__IPU2_CSI1_D_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 4, 0x0924, 0), /* MX6Q_PAD_EIM_D28__UART2_CTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D28__GPIO_3_28 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D28__IPU1_EXT_TRIG */
- IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D28__IPU1_DI0_PIN13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D29__WEIM_WEIM_D_29 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D29__IPU1_DI1_PIN15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 2, 0x0824, 1), /* MX6Q_PAD_EIM_D29__ECSPI4_SS0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 4, 0x0924, 1), /* MX6Q_PAD_EIM_D29__UART2_RTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D29__GPIO_3_29 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 6, 0x08E4, 0), /* MX6Q_PAD_EIM_D29__IPU2_CSI1_VSYNC */
- IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D29__IPU1_DI0_PIN14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D30__WEIM_WEIM_D_30 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D30__IPU1_DISP1_DAT_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D30__IPU1_DI0_PIN11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 3, 0x0000, 0), /* MX6Q_PAD_EIM_D30__IPU1_CSI0_D_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 4, 0x092C, 2), /* MX6Q_PAD_EIM_D30__UART3_CTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D30__GPIO_3_30 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 6, 0x0948, 0), /* MX6Q_PAD_EIM_D30__USBOH3_USBH1_OC */
- IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D30__PL301_PER1_HPROT_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D31__WEIM_WEIM_D_31 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D31__IPU1_DISP1_DAT_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D31__IPU1_DI0_PIN12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_D31__IPU1_CSI0_D_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 4, 0x092C, 3), /* MX6Q_PAD_EIM_D31__UART3_RTS */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D31__GPIO_3_31 */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D31__USBOH3_USBH1_PWR */
- IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D31__PL301_PER1_HPROT_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A24__WEIM_WEIM_A_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A24__IPU1_DISP1_DAT_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 2, 0x08D4, 1), /* MX6Q_PAD_EIM_A24__IPU2_CSI1_D_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A24__IPU2_SISG_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A24__IPU1_SISG_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A24__GPIO_5_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A24__PL301_PER1_HPROT_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A24__SRC_BT_CFG_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A23__WEIM_WEIM_A_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A23__IPU1_DISP1_DAT_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 2, 0x08D0, 1), /* MX6Q_PAD_EIM_A23__IPU2_CSI1_D_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A23__IPU2_SISG_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A23__IPU1_SISG_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A23__GPIO_6_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A23__PL301_PER1_HPROT_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A23__SRC_BT_CFG_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A22__WEIM_WEIM_A_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A22__IPU1_DISP1_DAT_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 2, 0x08CC, 1), /* MX6Q_PAD_EIM_A22__IPU2_CSI1_D_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A22__GPIO_2_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A22__TPSMP_HDATA_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A22__SRC_BT_CFG_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A21__WEIM_WEIM_A_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A21__IPU1_DISP1_DAT_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 2, 0x08C8, 1), /* MX6Q_PAD_EIM_A21__IPU2_CSI1_D_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A21__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A21__MIPI_CORE_DPHY_OUT_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A21__GPIO_2_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A21__TPSMP_HDATA_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A21__SRC_BT_CFG_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A20__WEIM_WEIM_A_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A20__IPU1_DISP1_DAT_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 2, 0x08C4, 1), /* MX6Q_PAD_EIM_A20__IPU2_CSI1_D_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A20__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A20__MIPI_CORE_DPHY_OUT_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A20__GPIO_2_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A20__TPSMP_HDATA_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A20__SRC_BT_CFG_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A19__WEIM_WEIM_A_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A19__IPU1_DISP1_DAT_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 2, 0x08C0, 1), /* MX6Q_PAD_EIM_A19__IPU2_CSI1_D_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A19__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A19__MIPI_CORE_DPHY_OUT_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A19__GPIO_2_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A19__TPSMP_HDATA_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A19__SRC_BT_CFG_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A18__WEIM_WEIM_A_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A18__IPU1_DISP1_DAT_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 2, 0x08BC, 1), /* MX6Q_PAD_EIM_A18__IPU2_CSI1_D_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A18__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A18__MIPI_CORE_DPHY_OUT_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A18__GPIO_2_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A18__TPSMP_HDATA_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A18__SRC_BT_CFG_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A17__WEIM_WEIM_A_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A17__IPU1_DISP1_DAT_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 2, 0x08B8, 1), /* MX6Q_PAD_EIM_A17__IPU2_CSI1_D_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A17__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A17__MIPI_CORE_DPHY_OUT_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A17__GPIO_2_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A17__TPSMP_HDATA_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A17__SRC_BT_CFG_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A16__WEIM_WEIM_A_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A16__IPU1_DI1_DISP_CLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 2, 0x08E0, 1), /* MX6Q_PAD_EIM_A16__IPU2_CSI1_PIXCLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A16__MIPI_CORE_DPHY_OUT_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A16__GPIO_2_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A16__TPSMP_HDATA_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A16__SRC_BT_CFG_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__WEIM_WEIM_CS_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__IPU1_DI1_PIN5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 2, 0x0810, 0), /* MX6Q_PAD_EIM_CS0__ECSPI2_SCLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__MIPI_CORE_DPHY_OUT_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__GPIO_2_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__TPSMP_HDATA_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__WEIM_WEIM_CS_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__IPU1_DI1_PIN6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 2, 0x0818, 0), /* MX6Q_PAD_EIM_CS1__ECSPI2_MOSI */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 4, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__MIPI_CORE_DPHY_OUT_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__GPIO_2_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__TPSMP_HDATA_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 0, 0x0000, 0), /* MX6Q_PAD_EIM_OE__WEIM_WEIM_OE */
- IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 1, 0x0000, 0), /* MX6Q_PAD_EIM_OE__IPU1_DI1_PIN7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 2, 0x0814, 0), /* MX6Q_PAD_EIM_OE__ECSPI2_MISO */
- IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 4, 0x0000, 0), /* MX6Q_PAD_EIM_OE__MIPI_CORE_DPHY_OUT_26 */
- IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 5, 0x0000, 0), /* MX6Q_PAD_EIM_OE__GPIO_2_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 6, 0x0000, 0), /* MX6Q_PAD_EIM_OE__TPSMP_HDATA_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 0, 0x0000, 0), /* MX6Q_PAD_EIM_RW__WEIM_WEIM_RW */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 1, 0x0000, 0), /* MX6Q_PAD_EIM_RW__IPU1_DI1_PIN8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 2, 0x081C, 0), /* MX6Q_PAD_EIM_RW__ECSPI2_SS0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 4, 0x0000, 0), /* MX6Q_PAD_EIM_RW__MIPI_CORE_DPHY_OUT_27 */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 5, 0x0000, 0), /* MX6Q_PAD_EIM_RW__GPIO_2_26 */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 6, 0x0000, 0), /* MX6Q_PAD_EIM_RW__TPSMP_HDATA_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 7, 0x0000, 0), /* MX6Q_PAD_EIM_RW__SRC_BT_CFG_29 */
- IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 0, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__WEIM_WEIM_LBA */
- IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 1, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__IPU1_DI1_PIN17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 2, 0x0820, 0), /* MX6Q_PAD_EIM_LBA__ECSPI2_SS1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 5, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__GPIO_2_27 */
- IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 6, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__TPSMP_HDATA_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 7, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__SRC_BT_CFG_26 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__WEIM_WEIM_EB_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__IPU1_DISP1_DAT_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 2, 0x08B4, 1), /* MX6Q_PAD_EIM_EB0__IPU2_CSI1_D_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__MIPI_CORE_DPHY_OUT_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 4, 0x07F0, 0), /* MX6Q_PAD_EIM_EB0__CCM_PMIC_RDY */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__GPIO_2_28 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__TPSMP_HDATA_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__SRC_BT_CFG_27 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__WEIM_WEIM_EB_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 1, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__IPU1_DISP1_DAT_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 2, 0x08B0, 1), /* MX6Q_PAD_EIM_EB1__IPU2_CSI1_D_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 3, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__MIPI_CORE_DPHY__OUT_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__GPIO_2_29 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 6, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__TPSMP_HDATA_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__SRC_BT_CFG_28 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__WEIM_WEIM_DA_A_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__IPU1_DISP1_DAT_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__IPU2_CSI1_D_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__MIPI_CORE_DPHY__OUT_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__GPIO_3_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__TPSMP_HDATA_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__SRC_BT_CFG_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__WEIM_WEIM_DA_A_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__IPU1_DISP1_DAT_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__IPU2_CSI1_D_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__MIPI_CORE_DPHY_OUT_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__USBPHY1_TX_LS_MODE */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__GPIO_3_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__TPSMP_HDATA_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__SRC_BT_CFG_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__WEIM_WEIM_DA_A_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__IPU1_DISP1_DAT_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__IPU2_CSI1_D_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__MIPI_CORE_DPHY_OUT_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__USBPHY1_TX_HS_MODE */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__GPIO_3_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__TPSMP_HDATA_16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__SRC_BT_CFG_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__WEIM_WEIM_DA_A_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__IPU1_DISP1_DAT_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__IPU2_CSI1_D_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__MIPI_CORE_DPHY_OUT_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__USBPHY1_TX_HIZ */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__GPIO_3_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__TPSMP_HDATA_17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__SRC_BT_CFG_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__WEIM_WEIM_DA_A_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__IPU1_DISP1_DAT_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__IPU2_CSI1_D_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__MIPI_CORE_DPHY_OUT_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__ANATOP_USBPHY1_TX_EN */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__GPIO_3_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__TPSMP_HDATA_18 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__SRC_BT_CFG_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__WEIM_WEIM_DA_A_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__IPU1_DISP1_DAT_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__IPU2_CSI1_D_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__MIPI_CORE_DPHY_OUT_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__ANATOP_USBPHY1_TX_DP */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__GPIO_3_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__TPSMP_HDATA_19 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__SRC_BT_CFG_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__WEIM_WEIM_DA_A_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__IPU1_DISP1_DAT_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__IPU2_CSI1_D_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__MIPI_CORE_DPHY_OUT_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__ANATOP_USBPHY1_TX_DN */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__GPIO_3_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__TPSMP_HDATA_20 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__SRC_BT_CFG_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__WEIM_WEIM_DA_A_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__IPU1_DISP1_DAT_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__IPU2_CSI1_D_2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__MIPI_CORE_DPHY_OUT_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__GPIO_3_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__TPSMP_HDATA_21 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__SRC_BT_CFG_7 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__WEIM_WEIM_DA_A_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__IPU1_DISP1_DAT_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__IPU2_CSI1_D_1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__MIPI_CORE_DPHY_OUT_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__GPIO_3_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__TPSMP_HDATA_22 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__SRC_BT_CFG_8 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__WEIM_WEIM_DA_A_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__IPU1_DISP1_DAT_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__IPU2_CSI1_D_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__MIPI_CORE_DPHY_OUT_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__GPIO_3_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__TPSMP_HDATA_23 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__SRC_BT_CFG_9 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__WEIM_WEIM_DA_A_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__IPU1_DI1_PIN15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 2, 0x08D8, 1), /* MX6Q_PAD_EIM_DA10__IPU2_CSI1_DATA_EN */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__MIPI_CORE_DPHY_OUT12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__GPIO_3_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__TPSMP_HDATA_24 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__SRC_BT_CFG_10 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__WEIM_WEIM_DA_A_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__IPU1_DI1_PIN2 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 2, 0x08DC, 1), /* MX6Q_PAD_EIM_DA11__IPU2_CSI1_HSYNC */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__MIPI_CORE_DPHY_OUT13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__SDMA_DBG_EVT_CHN_6 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__GPIO_3_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__TPSMP_HDATA_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__SRC_BT_CFG_11 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__WEIM_WEIM_DA_A_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__IPU1_DI1_PIN3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 2, 0x08E4, 1), /* MX6Q_PAD_EIM_DA12__IPU2_CSI1_VSYNC */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__MIPI_CORE_DPHY_OUT14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__SDMA_DEBUG_EVT_CHN_3 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__GPIO_3_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__TPSMP_HDATA_26 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__SRC_BT_CFG_12 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__WEIM_WEIM_DA_A_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__IPU1_DI1_D0_CS */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 2, 0x07EC, 1), /* MX6Q_PAD_EIM_DA13__CCM_DI1_EXT_CLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__MIPI_CORE_DPHY_OUT15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__SDMA_DEBUG_EVT_CHN_4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__GPIO_3_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__TPSMP_HDATA_27 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__SRC_BT_CFG_13 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__WEIM_WEIM_DA_A_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__IPU1_DI1_D1_CS */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__CCM_DI0_EXT_CLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__MIPI_CORE_DPHY_OUT16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__SDMA_DEBUG_EVT_CHN_5 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__GPIO_3_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__TPSMP_HDATA_28 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__SRC_BT_CFG_14 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__WEIM_WEIM_DA_A_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__IPU1_DI1_PIN1 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__IPU1_DI1_PIN4 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__MIPI_CORE_DPHY_OUT17 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__GPIO_3_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__TPSMP_HDATA_29 */
- IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__SRC_BT_CFG_15 */
- IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 0, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__WEIM_WEIM_WAIT */
- IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 1, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__WEIM_WEIM_DTACK_B */
- IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 5, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__GPIO_5_0 */
- IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 6, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__TPSMP_HDATA_30 */
- IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 7, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__SRC_BT_CFG_25 */
- IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 0, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__WEIM_WEIM_BCLK */
- IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 1, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__IPU1_DI1_PIN16 */
- IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 5, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__GPIO_6_31 */
- IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 6, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__TPSMP_HDATA_31 */
- IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 0, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__IPU1_DI0_DSP_CLK */
- IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 1, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__IPU2_DI0_DSP_CLK */
- IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 3, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__MIPI_CR_DPY_OT28 */
- IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 4, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__SDMA_DBG_CR_STA0 */
- IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 5, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__GPIO_4_16 */
- IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 6, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__MMDC_DEBUG_0 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__IPU1_DI0_PIN15 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__IPU2_DI0_PIN15 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__AUDMUX_AUD6_TXC */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 3, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__MIPI_CR_DPHY_OUT_29 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__SDMA_DBG_CORE_STA_1 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__GPIO_4_17 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__MMDC_MMDC_DEBUG_1 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__IPU1_DI0_PIN2 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__IPU2_DI0_PIN2 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__AUDMUX_AUD6_TXD */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 3, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__MIPI_CR_DPHY_OUT_30 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__SDMA_DBG_CORE_STA_2 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__GPIO_4_18 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__MMDC_DEBUG_2 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 7, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__PL301_PER1_HADDR_9 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__IPU1_DI0_PIN3 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__IPU2_DI0_PIN3 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 3, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__MIPI_CORE_DPHY_OUT31 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__SDMA_DBG_CORE_STA_3 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__GPIO_4_19 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__MMDC_MMDC_DEBUG_3 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 7, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__PL301_PER1_HADDR_10 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__IPU1_DI0_PIN4 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__IPU2_DI0_PIN4 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__AUDMUX_AUD6_RXD */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 3, 0x094C, 0), /* MX6Q_PAD_DI0_PIN4__USDHC1_WP */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__SDMA_DEBUG_YIELD */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__GPIO_4_20 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__MMDC_MMDC_DEBUG_4 */
- IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 7, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__PL301_PER1_HADDR_11 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__IPU1_DISP0_DAT_0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__IPU2_DISP0_DAT_0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__ECSPI3_SCLK */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__USDHC1_USDHC_DBG_0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__SDMA_DBG_CORE_RUN */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__GPIO_4_21 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__MMDC_MMDC_DEBUG_5 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__IPU1_DISP0_DAT_1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__IPU2_DISP0_DAT_1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__ECSPI3_MOSI */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__USDHC1_USDHC_DBG_1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__SDMA_DBG_EVT_CHNSL */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__GPIO_4_22 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__MMDC_DEBUG_6 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__PL301_PER1_HADR_12 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__IPU1_DISP0_DAT_2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__IPU2_DISP0_DAT_2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__ECSPI3_MISO */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__USDHC1_USDHC_DBG_2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__SDMA_DEBUG_MODE */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__GPIO_4_23 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__MMDC_DEBUG_7 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__PL301_PER1_HADR_13 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__IPU1_DISP0_DAT_3 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__IPU2_DISP0_DAT_3 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__ECSPI3_SS0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__USDHC1_USDHC_DBG_3 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__SDMA_DBG_BUS_ERROR */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__GPIO_4_24 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__MMDC_MMDC_DBG_8 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__PL301_PER1_HADR_14 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__IPU1_DISP0_DAT_4 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__IPU2_DISP0_DAT_4 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__ECSPI3_SS1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__USDHC1_USDHC_DBG_4 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__GPIO_4_25 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__MMDC_MMDC_DEBUG_9 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__PL301_PER1_HADR_15 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__IPU1_DISP0_DAT_5 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__IPU2_DISP0_DAT_5 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__ECSPI3_SS2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__AUDMUX_AUD6_RXFS */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__SDMA_DBG_MCH_DMBUS */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__GPIO_4_26 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__MMDC_DEBUG_10 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__PL301_PER1_HADR_16 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__IPU1_DISP0_DAT_6 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__IPU2_DISP0_DAT_6 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__ECSPI3_SS3 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__AUDMUX_AUD6_RXC */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__SDMA_DBG_RTBUF_WRT */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__GPIO_4_27 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__MMDC_DEBUG_11 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__PL301_PER1_HADR_17 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__IPU1_DISP0_DAT_7 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__IPU2_DISP0_DAT_7 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__ECSPI3_RDY */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__USDHC1_USDHC_DBG_5 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__SDMA_DBG_EVT_CHN_0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__GPIO_4_28 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__MMDC_DEBUG_12 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__PL301_PER1_HADR_18 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__IPU1_DISP0_DAT_8 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__IPU2_DISP0_DAT_8 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__PWM1_PWMO */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__WDOG1_WDOG_B */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__SDMA_DBG_EVT_CHN_1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__GPIO_4_29 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__MMDC_DEBUG_13 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__PL301_PER1_HADR_19 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__IPU1_DISP0_DAT_9 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__IPU2_DISP0_DAT_9 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__PWM2_PWMO */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__WDOG2_WDOG_B */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__SDMA_DBG_EVT_CHN_2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__GPIO_4_30 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__MMDC_DEBUG_14 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__PL301_PER1_HADR_20 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__IPU1_DISP0_DAT_10 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__IPU2_DISP0_DAT_10 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__USDHC1_DBG_6 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__SDMA_DBG_EVT_CHN3 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__GPIO_4_31 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__MMDC_DEBUG_15 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__PL301_PER1_HADR21 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__IPU1_DISP0_DAT_11 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__IPU2_DISP0_DAT_11 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__USDHC1_USDHC_DBG7 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__SDMA_DBG_EVT_CHN4 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__GPIO_5_5 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__MMDC_DEBUG_16 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__PL301_PER1_HADR22 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__IPU1_DISP0_DAT_12 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__IPU2_DISP0_DAT_12 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__SDMA_DBG_EVT_CHN5 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__GPIO_5_6 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__MMDC_DEBUG_17 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__PL301_PER1_HADR23 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__IPU1_DISP0_DAT_13 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__IPU2_DISP0_DAT_13 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 3, 0x07D8, 1), /* MX6Q_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__SDMA_DBG_EVT_CHN0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__GPIO_5_7 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__MMDC_DEBUG_18 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__PL301_PER1_HADR24 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__IPU1_DISP0_DAT_14 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__IPU2_DISP0_DAT_14 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 3, 0x07D4, 1), /* MX6Q_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__SDMA_DBG_EVT_CHN1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__GPIO_5_8 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__MMDC_DEBUG_19 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__IPU1_DISP0_DAT_15 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__IPU2_DISP0_DAT_15 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 2, 0x0804, 1), /* MX6Q_PAD_DISP0_DAT15__ECSPI1_SS1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 3, 0x0820, 1), /* MX6Q_PAD_DISP0_DAT15__ECSPI2_SS1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__SDMA_DBG_EVT_CHN2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__GPIO_5_9 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__MMDC_DEBUG_20 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__PL301_PER1_HADR25 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__IPU1_DISP0_DAT_16 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__IPU2_DISP0_DAT_16 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 2, 0x0818, 1), /* MX6Q_PAD_DISP0_DAT16__ECSPI2_MOSI */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 3, 0x07DC, 0), /* MX6Q_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 4, 0x090C, 0), /* MX6Q_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__GPIO_5_10 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__MMDC_DEBUG_21 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__PL301_PER1_HADR26 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__IPU1_DISP0_DAT_17 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__IPU2_DISP0_DAT_17 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 2, 0x0814, 1), /* MX6Q_PAD_DISP0_DAT17__ECSPI2_MISO */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 3, 0x07D0, 0), /* MX6Q_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 4, 0x0910, 0), /* MX6Q_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__GPIO_5_11 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__MMDC_DEBUG_22 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__PL301_PER1_HADR27 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__IPU1_DISP0_DAT_18 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__IPU2_DISP0_DAT_18 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 2, 0x081C, 1), /* MX6Q_PAD_DISP0_DAT18__ECSPI2_SS0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 3, 0x07E0, 0), /* MX6Q_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 4, 0x07C0, 0), /* MX6Q_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__GPIO_5_12 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__MMDC_DEBUG_23 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__WEIM_WEIM_CS_2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__IPU1_DISP0_DAT_19 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__IPU2_DISP0_DAT_19 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 2, 0x0810, 1), /* MX6Q_PAD_DISP0_DAT19__ECSPI2_SCLK */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 3, 0x07CC, 0), /* MX6Q_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 4, 0x07BC, 0), /* MX6Q_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__GPIO_5_13 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__MMDC_DEBUG_24 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__WEIM_WEIM_CS_3 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__IPU1_DISP0_DAT_20 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__IPU2_DISP0_DAT_20 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 2, 0x07F4, 1), /* MX6Q_PAD_DISP0_DAT20__ECSPI1_SCLK */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 3, 0x07C4, 0), /* MX6Q_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__SDMA_DBG_EVT_CHN7 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__GPIO_5_14 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__MMDC_DEBUG_25 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__PL301_PER1_HADR28 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__IPU1_DISP0_DAT_21 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__IPU2_DISP0_DAT_21 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 2, 0x07FC, 1), /* MX6Q_PAD_DISP0_DAT21__ECSPI1_MOSI */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 3, 0x07B8, 1), /* MX6Q_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__SDMA_DBG_BUS_DEV0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__GPIO_5_15 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__MMDC_DEBUG_26 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__PL301_PER1_HADR29 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__IPU1_DISP0_DAT_22 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__IPU2_DISP0_DAT_22 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 2, 0x07F8, 1), /* MX6Q_PAD_DISP0_DAT22__ECSPI1_MISO */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 3, 0x07C8, 1), /* MX6Q_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__SDMA_DBG_BUS_DEV1 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__GPIO_5_16 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__MMDC_DEBUG_27 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__PL301_PER1_HADR30 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__IPU1_DISP0_DAT_23 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__IPU2_DISP0_DAT_23 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 2, 0x0800, 1), /* MX6Q_PAD_DISP0_DAT23__ECSPI1_SS0 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 3, 0x07B4, 1), /* MX6Q_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__SDMA_DBG_BUS_DEV2 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__GPIO_5_17 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__MMDC_DEBUG_28 */
- IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__PL301_PER1_HADR31 */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 0, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__RESERVED_RESERVED */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 1, 0x0840, 0), /* MX6Q_PAD_ENET_MDIO__ENET_MDIO */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 2, 0x086C, 0), /* MX6Q_PAD_ENET_MDIO__ESAI1_SCKR */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 3, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__SDMA_DEBUG_BUS_DEV3 */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 4, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__ENET_1588_EVT1_OUT */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 5, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__GPIO_1_22 */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 6, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__SPDIF_PLOCK */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 0, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__RESERVED_RSRVED */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 1, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__ENET_TX_CLK */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 2, 0x085C, 0), /* MX6Q_PAD_ENET_REF_CLK__ESAI1_FSR */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 3, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__SDMA_DBGBUS_DEV4 */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 5, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__GPIO_1_23 */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 6, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__SPDIF_SRCLK */
- IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 7, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__USBPHY1_RX_SQH */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 1, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ENET_RX_ER */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 2, 0x0864, 0), /* MX6Q_PAD_ENET_RX_ER__ESAI1_HCKR */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 3, 0x0914, 1), /* MX6Q_PAD_ENET_RX_ER__SPDIF_IN1 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 4, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ENET_1588_EVT2_OUT */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 5, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__GPIO_1_24 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 6, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__PHY_TDI */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 7, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__USBPHY1_RX_HS_RXD */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 0, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__RESERVED_RSRVED */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 1, 0x0858, 1), /* MX6Q_PAD_ENET_CRS_DV__ENET_RX_EN */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 2, 0x0870, 0), /* MX6Q_PAD_ENET_CRS_DV__ESAI1_SCKT */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 3, 0x0918, 1), /* MX6Q_PAD_ENET_CRS_DV__SPDIF_EXTCLK */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 5, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__GPIO_1_25 */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 6, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__PHY_TDO */
- IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 7, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__USBPHY1_RX_FS_RXD */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 0, 0x0908, 0), /* MX6Q_PAD_ENET_RXD1__MLB_MLBSIG */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 1, 0x084C, 1), /* MX6Q_PAD_ENET_RXD1__ENET_RDATA_1 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 2, 0x0860, 0), /* MX6Q_PAD_ENET_RXD1__ESAI1_FST */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 4, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__ENET_1588_EVT3_OUT */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 5, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__GPIO_1_26 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 6, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__PHY_TCK */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 7, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__USBPHY1_RX_DISCON */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__OSC32K_32K_OUT */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 1, 0x0848, 1), /* MX6Q_PAD_ENET_RXD0__ENET_RDATA_0 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 2, 0x0868, 0), /* MX6Q_PAD_ENET_RXD0__ESAI1_HCKT */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 3, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__SPDIF_OUT1 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 5, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__GPIO_1_27 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 6, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__PHY_TMS */
- IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 7, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__USBPHY1_PLL_CK20DIV */
- IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__RESERVED_RSRVED */
- IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 1, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__ENET_TX_EN */
- IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 2, 0x0880, 0), /* MX6Q_PAD_ENET_TX_EN__ESAI1_TX3_RX2 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 5, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__GPIO_1_28 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 6, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__SATA_PHY_TDI */
- IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 7, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__USBPHY2_RX_SQH */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 0, 0x0900, 0), /* MX6Q_PAD_ENET_TXD1__MLB_MLBCLK */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 1, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__ENET_TDATA_1 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 2, 0x087C, 0), /* MX6Q_PAD_ENET_TXD1__ESAI1_TX2_RX3 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 4, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__ENET_1588_EVENT0_IN */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 5, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__GPIO_1_29 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 6, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__SATA_PHY_TDO */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 7, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__USBPHY2_RX_HS_RXD */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 0, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__RESERVED_RSRVED */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 1, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__ENET_TDATA_0 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 2, 0x0884, 0), /* MX6Q_PAD_ENET_TXD0__ESAI1_TX4_RX1 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 5, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__GPIO_1_30 */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 6, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__SATA_PHY_TCK */
- IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 7, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__USBPHY2_RX_FS_RXD */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 0, 0x0904, 0), /* MX6Q_PAD_ENET_MDC__MLB_MLBDAT */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 1, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__ENET_MDC */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 2, 0x0888, 0), /* MX6Q_PAD_ENET_MDC__ESAI1_TX5_RX0 */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 4, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__ENET_1588_EVENT1_IN */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 5, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__GPIO_1_31 */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 6, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__SATA_PHY_TMS */
- IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 7, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__USBPHY2_RX_DISCON */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D40, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D40__MMDC_DRAM_D_40 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D41, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D41__MMDC_DRAM_D_41 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D42, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D42__MMDC_DRAM_D_42 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D43, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D43__MMDC_DRAM_D_43 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D44, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D44__MMDC_DRAM_D_44 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D45, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D45__MMDC_DRAM_D_45 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D46, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D46__MMDC_DRAM_D_46 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D47, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D47__MMDC_DRAM_D_47 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS5, 0x050C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS5__MMDC_DRAM_SDQS_5 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM5, 0x0510, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM5__MMDC_DRAM_DQM_5 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D32, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D32__MMDC_DRAM_D_32 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D33, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D33__MMDC_DRAM_D_33 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D34, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D34__MMDC_DRAM_D_34 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D35, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D35__MMDC_DRAM_D_35 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D36, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D36__MMDC_DRAM_D_36 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D37, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D37__MMDC_DRAM_D_37 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D38, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D38__MMDC_DRAM_D_38 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D39, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D39__MMDC_DRAM_D_39 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM4, 0x0514, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM4__MMDC_DRAM_DQM_4 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS4, 0x0518, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS4__MMDC_DRAM_SDQS_4 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D24, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D24__MMDC_DRAM_D_24 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D25, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D25__MMDC_DRAM_D_25 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D26, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D26__MMDC_DRAM_D_26 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D27, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D27__MMDC_DRAM_D_27 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D28, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D28__MMDC_DRAM_D_28 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D29, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D29__MMDC_DRAM_D_29 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS3, 0x051C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS3__MMDC_DRAM_SDQS_3 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D30, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D30__MMDC_DRAM_D_30 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D31, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D31__MMDC_DRAM_D_31 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM3, 0x0520, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM3__MMDC_DRAM_DQM_3 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D16, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D16__MMDC_DRAM_D_16 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D17, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D17__MMDC_DRAM_D_17 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D18, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D18__MMDC_DRAM_D_18 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D19, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D19__MMDC_DRAM_D_19 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D20, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D20__MMDC_DRAM_D_20 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D21, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D21__MMDC_DRAM_D_21 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D22, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D22__MMDC_DRAM_D_22 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS2, 0x0524, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS2__MMDC_DRAM_SDQS_2 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D23, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D23__MMDC_DRAM_D_23 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM2, 0x0528, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM2__MMDC_DRAM_DQM_2 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A0, 0x052C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A0__MMDC_DRAM_A_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A1, 0x0530, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A1__MMDC_DRAM_A_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A2, 0x0534, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A2__MMDC_DRAM_A_2 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A3, 0x0538, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A3__MMDC_DRAM_A_3 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A4, 0x053C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A4__MMDC_DRAM_A_4 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A5, 0x0540, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A5__MMDC_DRAM_A_5 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A6, 0x0544, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A6__MMDC_DRAM_A_6 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A7, 0x0548, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A7__MMDC_DRAM_A_7 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A8, 0x054C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A8__MMDC_DRAM_A_8 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A9, 0x0550, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A9__MMDC_DRAM_A_9 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A10, 0x0554, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A10__MMDC_DRAM_A_10 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A11, 0x0558, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A11__MMDC_DRAM_A_11 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A12, 0x055C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A12__MMDC_DRAM_A_12 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A13, 0x0560, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A13__MMDC_DRAM_A_13 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A14, 0x0564, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A14__MMDC_DRAM_A_14 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_A15, 0x0568, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A15__MMDC_DRAM_A_15 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_CAS, 0x056C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_CAS__MMDC_DRAM_CAS */
- IMX_PIN_REG(MX6Q_PAD_DRAM_CS0, 0x0570, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_CS0__MMDC_DRAM_CS_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_CS1, 0x0574, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_CS1__MMDC_DRAM_CS_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_RAS, 0x0578, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_RAS__MMDC_DRAM_RAS */
- IMX_PIN_REG(MX6Q_PAD_DRAM_RESET, 0x057C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_RESET__MMDC_DRAM_RESET */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDBA0, 0x0580, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDBA0__MMDC_DRAM_SDBA_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDBA1, 0x0584, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDBA1__MMDC_DRAM_SDBA_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDCLK_0, 0x0588, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCLK_0__MMDC_DRAM_SDCLK0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDBA2, 0x058C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDBA2__MMDC_DRAM_SDBA_2 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDCKE0, 0x0590, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCKE0__MMDC_DRAM_SDCKE_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDCLK_1, 0x0594, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCLK_1__MMDC_DRAM_SDCLK1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDCKE1, 0x0598, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCKE1__MMDC_DRAM_SDCKE_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDODT0, 0x059C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDODT0__MMDC_DRAM_ODT_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDODT1, 0x05A0, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDODT1__MMDC_DRAM_ODT_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDWE, 0x05A4, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDWE__MMDC_DRAM_SDWE */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D0, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D0__MMDC_DRAM_D_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D1, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D1__MMDC_DRAM_D_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D2, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D2__MMDC_DRAM_D_2 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D3, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D3__MMDC_DRAM_D_3 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D4, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D4__MMDC_DRAM_D_4 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D5, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D5__MMDC_DRAM_D_5 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS0, 0x05A8, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS0__MMDC_DRAM_SDQS_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D6, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D6__MMDC_DRAM_D_6 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D7, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D7__MMDC_DRAM_D_7 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM0, 0x05AC, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM0__MMDC_DRAM_DQM_0 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D8, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D8__MMDC_DRAM_D_8 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D9, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D9__MMDC_DRAM_D_9 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D10, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D10__MMDC_DRAM_D_10 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D11, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D11__MMDC_DRAM_D_11 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D12, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D12__MMDC_DRAM_D_12 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D13, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D13__MMDC_DRAM_D_13 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D14, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D14__MMDC_DRAM_D_14 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS1, 0x05B0, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS1__MMDC_DRAM_SDQS_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D15, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D15__MMDC_DRAM_D_15 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM1, 0x05B4, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM1__MMDC_DRAM_DQM_1 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D48, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D48__MMDC_DRAM_D_48 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D49, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D49__MMDC_DRAM_D_49 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D50, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D50__MMDC_DRAM_D_50 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D51, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D51__MMDC_DRAM_D_51 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D52, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D52__MMDC_DRAM_D_52 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D53, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D53__MMDC_DRAM_D_53 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D54, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D54__MMDC_DRAM_D_54 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D55, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D55__MMDC_DRAM_D_55 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS6, 0x05B8, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS6__MMDC_DRAM_SDQS_6 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM6, 0x05BC, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM6__MMDC_DRAM_DQM_6 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D56, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D56__MMDC_DRAM_D_56 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS7, 0x05C0, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS7__MMDC_DRAM_SDQS_7 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D57, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D57__MMDC_DRAM_D_57 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D58, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D58__MMDC_DRAM_D_58 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D59, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D59__MMDC_DRAM_D_59 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D60, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D60__MMDC_DRAM_D_60 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_DQM7, 0x05C4, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM7__MMDC_DRAM_DQM_7 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D61, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D61__MMDC_DRAM_D_61 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D62, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D62__MMDC_DRAM_D_62 */
- IMX_PIN_REG(MX6Q_PAD_DRAM_D63, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D63__MMDC_DRAM_D_63 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 0, 0x07F4, 2), /* MX6Q_PAD_KEY_COL0__ECSPI1_SCLK */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 1, 0x0854, 1), /* MX6Q_PAD_KEY_COL0__ENET_RDATA_3 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 2, 0x07DC, 1), /* MX6Q_PAD_KEY_COL0__AUDMUX_AUD5_TXC */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__KPP_COL_0 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 4, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__UART4_TXD */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__GPIO_4_6 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__DCIC1_DCIC_OUT */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__SRC_ANY_PU_RST */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 0, 0x07FC, 2), /* MX6Q_PAD_KEY_ROW0__ECSPI1_MOSI */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__ENET_TDATA_3 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 2, 0x07D0, 1), /* MX6Q_PAD_KEY_ROW0__AUDMUX_AUD5_TXD */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__KPP_ROW_0 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 4, 0x0938, 1), /* MX6Q_PAD_KEY_ROW0__UART4_RXD */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__GPIO_4_7 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__DCIC2_DCIC_OUT */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__PL301_PER1_HADR_0 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 0, 0x07F8, 2), /* MX6Q_PAD_KEY_COL1__ECSPI1_MISO */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 1, 0x0840, 1), /* MX6Q_PAD_KEY_COL1__ENET_MDIO */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 2, 0x07E0, 1), /* MX6Q_PAD_KEY_COL1__AUDMUX_AUD5_TXFS */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__KPP_COL_1 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 4, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__UART5_TXD */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__GPIO_4_8 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__USDHC1_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__PL301MX_PER1_HADR_1 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 0, 0x0800, 2), /* MX6Q_PAD_KEY_ROW1__ECSPI1_SS0 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__ENET_COL */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 2, 0x07CC, 1), /* MX6Q_PAD_KEY_ROW1__AUDMUX_AUD5_RXD */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__KPP_ROW_1 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 4, 0x0940, 1), /* MX6Q_PAD_KEY_ROW1__UART5_RXD */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__GPIO_4_9 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__USDHC2_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__PL301_PER1_HADDR_2 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 0, 0x0804, 2), /* MX6Q_PAD_KEY_COL2__ECSPI1_SS1 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 1, 0x0850, 1), /* MX6Q_PAD_KEY_COL2__ENET_RDATA_2 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 2, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__CAN1_TXCAN */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__KPP_COL_2 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 4, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__ENET_MDC */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__GPIO_4_10 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__USBOH3_H1_PWRCTL_WKP */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__PL301_PER1_HADDR_3 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 0, 0x0808, 1), /* MX6Q_PAD_KEY_ROW2__ECSPI1_SS2 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__ENET_TDATA_2 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 2, 0x07E4, 0), /* MX6Q_PAD_KEY_ROW2__CAN1_RXCAN */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__KPP_ROW_2 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 4, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__USDHC2_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__GPIO_4_11 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 6, 0x088C, 1), /* MX6Q_PAD_KEY_ROW2__HDMI_TX_CEC_LINE */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__PL301_PER1_HADR_4 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 0, 0x080C, 1), /* MX6Q_PAD_KEY_COL3__ECSPI1_SS3 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 1, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__ENET_CRS */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 2, 0x0890, 1), /* MX6Q_PAD_KEY_COL3__HDMI_TX_DDC_SCL */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__KPP_COL_3 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 4, 0x08A0, 1), /* MX6Q_PAD_KEY_COL3__I2C2_SCL */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__GPIO_4_12 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 6, 0x0914, 2), /* MX6Q_PAD_KEY_COL3__SPDIF_IN1 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__PL301_PER1_HADR_5 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 0, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__OSC32K_32K_OUT */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 1, 0x07B0, 0), /* MX6Q_PAD_KEY_ROW3__ASRC_ASRC_EXT_CLK */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 2, 0x0894, 1), /* MX6Q_PAD_KEY_ROW3__HDMI_TX_DDC_SDA */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__KPP_ROW_3 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 4, 0x08A4, 1), /* MX6Q_PAD_KEY_ROW3__I2C2_SDA */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__GPIO_4_13 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__USDHC1_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__PL301_PER1_HADR_6 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 0, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__CAN2_TXCAN */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 1, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__IPU1_SISG_4 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 2, 0x0944, 1), /* MX6Q_PAD_KEY_COL4__USBOH3_USBOTG_OC */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__KPP_COL_4 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 4, 0x093C, 0), /* MX6Q_PAD_KEY_COL4__UART5_RTS */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__GPIO_4_14 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__MMDC_DEBUG_49 */
- IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__PL301_PER1_HADDR_7 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 0, 0x07E8, 0), /* MX6Q_PAD_KEY_ROW4__CAN2_RXCAN */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__IPU1_SISG_5 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 2, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__USBOH3_USBOTG_PWR */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__KPP_ROW_4 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 4, 0x093C, 1), /* MX6Q_PAD_KEY_ROW4__UART5_CTS */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__GPIO_4_15 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__MMDC_DEBUG_50 */
- IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__PL301_PER1_HADR_8 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 0, 0x0000, 0), /* MX6Q_PAD_GPIO_0__CCM_CLKO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 2, 0x08E8, 0), /* MX6Q_PAD_GPIO_0__KPP_COL_5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 3, 0x07B0, 1), /* MX6Q_PAD_GPIO_0__ASRC_ASRC_EXT_CLK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_0__EPIT1_EPITO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_0__GPIO_1_0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_0__USBOH3_USBH1_PWR */
- IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_0__SNVS_HP_WRAP_SNVS_VIO5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 0, 0x086C, 1), /* MX6Q_PAD_GPIO_1__ESAI1_SCKR */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_1__WDOG2_WDOG_B */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 2, 0x08F4, 0), /* MX6Q_PAD_GPIO_1__KPP_ROW_5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_1__PWM2_PWMO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_1__GPIO_1_1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_1__USDHC1_CD */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_1__SRC_TESTER_ACK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 0, 0x085C, 1), /* MX6Q_PAD_GPIO_9__ESAI1_FSR */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_9__WDOG1_WDOG_B */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 2, 0x08EC, 0), /* MX6Q_PAD_GPIO_9__KPP_COL_6 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_9__CCM_REF_EN_B */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_9__PWM1_PWMO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_9__GPIO_1_9 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 6, 0x094C, 1), /* MX6Q_PAD_GPIO_9__USDHC1_WP */
- IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_9__SRC_EARLY_RST */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 0, 0x0864, 1), /* MX6Q_PAD_GPIO_3__ESAI1_HCKR */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_3__OBSERVE_MUX_INT_OUT0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 2, 0x08A8, 1), /* MX6Q_PAD_GPIO_3__I2C3_SCL */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_3__ANATOP_24M_OUT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_3__CCM_CLKO2 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_3__GPIO_1_3 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 6, 0x0948, 1), /* MX6Q_PAD_GPIO_3__USBOH3_USBH1_OC */
- IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 7, 0x0900, 1), /* MX6Q_PAD_GPIO_3__MLB_MLBCLK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 0, 0x0870, 1), /* MX6Q_PAD_GPIO_6__ESAI1_SCKT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_6__OBSERVE_MUX_INT_OUT1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 2, 0x08AC, 1), /* MX6Q_PAD_GPIO_6__I2C3_SDA */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_6__CCM_CCM_OUT_0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_6__CSU_CSU_INT_DEB */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_6__GPIO_1_6 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_6__USDHC2_LCTL */
- IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 7, 0x0908, 1), /* MX6Q_PAD_GPIO_6__MLB_MLBSIG */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 0, 0x0860, 1), /* MX6Q_PAD_GPIO_2__ESAI1_FST */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_2__OBSERVE_MUX_INT_OUT2 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 2, 0x08F8, 1), /* MX6Q_PAD_GPIO_2__KPP_ROW_6 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_2__CCM_CCM_OUT_1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_2__GPIO_1_2 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_2__USDHC2_WP */
- IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 7, 0x0904, 1), /* MX6Q_PAD_GPIO_2__MLB_MLBDAT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 0, 0x0868, 1), /* MX6Q_PAD_GPIO_4__ESAI1_HCKT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_4__OBSERVE_MUX_INT_OUT3 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 2, 0x08F0, 1), /* MX6Q_PAD_GPIO_4__KPP_COL_7 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_4__CCM_CCM_OUT_2 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_4__USDHC2_CD */
- IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_4__OCOTP_CRL_WRAR_FUSE_LA */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 0, 0x087C, 1), /* MX6Q_PAD_GPIO_5__ESAI1_TX2_RX3 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_5__OBSERVE_MUX_INT_OUT4 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 2, 0x08FC, 1), /* MX6Q_PAD_GPIO_5__KPP_ROW_7 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_5__CCM_CLKO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 6, 0x08A8, 2), /* MX6Q_PAD_GPIO_5__I2C3_SCL */
- IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_5__CHEETAH_EVENTI */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 0, 0x0884, 1), /* MX6Q_PAD_GPIO_7__ESAI1_TX4_RX1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_7__ECSPI5_RDY */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_7__EPIT1_EPITO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_7__CAN1_TXCAN */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_7__UART2_TXD */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_7__GPIO_1_7 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_7__SPDIF_PLOCK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_7__USBOH3_OTGUSB_HST_MODE */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 0, 0x0888, 1), /* MX6Q_PAD_GPIO_8__ESAI1_TX5_RX0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_8__ANATOP_ANATOP_32K_OUT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_8__EPIT2_EPITO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 3, 0x07E4, 1), /* MX6Q_PAD_GPIO_8__CAN1_RXCAN */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 4, 0x0928, 3), /* MX6Q_PAD_GPIO_8__UART2_RXD */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_8__GPIO_1_8 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_8__SPDIF_SRCLK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_8__USBOH3_OTG_PWRCTL_WAK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 0, 0x0880, 1), /* MX6Q_PAD_GPIO_16__ESAI1_TX3_RX2 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_16__ENET_1588_EVENT2_IN */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 2, 0x083C, 1), /* MX6Q_PAD_GPIO_16__ENET_ETHERNET_REF_OUT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_16__USDHC1_LCTL */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 4, 0x0914, 3), /* MX6Q_PAD_GPIO_16__SPDIF_IN1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_16__GPIO_7_11 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 6, 0x08AC, 2), /* MX6Q_PAD_GPIO_16__I2C3_SDA */
- IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_16__SJC_DE_B */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 0, 0x0874, 0), /* MX6Q_PAD_GPIO_17__ESAI1_TX0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_17__ENET_1588_EVENT3_IN */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 2, 0x07F0, 1), /* MX6Q_PAD_GPIO_17__CCM_PMIC_RDY */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 3, 0x090C, 1), /* MX6Q_PAD_GPIO_17__SDMA_SDMA_EXT_EVENT_0 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_17__SPDIF_OUT1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_17__GPIO_7_12 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_17__SJC_JTAG_ACT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 0, 0x0878, 0), /* MX6Q_PAD_GPIO_18__ESAI1_TX1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 1, 0x0844, 1), /* MX6Q_PAD_GPIO_18__ENET_RX_CLK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_18__USDHC3_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 3, 0x0910, 1), /* MX6Q_PAD_GPIO_18__SDMA_SDMA_EXT_EVENT_1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 4, 0x07B0, 2), /* MX6Q_PAD_GPIO_18__ASRC_ASRC_EXT_CLK */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_18__GPIO_7_13 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_18__SNVS_HP_WRA_SNVS_VIO5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_18__SRC_SYSTEM_RST */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 0, 0x08E8, 1), /* MX6Q_PAD_GPIO_19__KPP_COL_5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_19__ENET_1588_EVENT0_OUT */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_19__SPDIF_OUT1 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_19__CCM_CLKO */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_19__ECSPI1_RDY */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_19__GPIO_4_5 */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_19__ENET_TX_ER */
- IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_19__SRC_INT_BOOT */
- IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK */
- IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__PCIE_CTRL_MUX_12 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__GPIO_5_18 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK___MMDC_DEBUG_29 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__CHEETAH_EVENTO */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__PCIE_CTRL_MUX_13 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__CCM_CLKO */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__GPIO_5_19 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__MMDC_MMDC_DEBUG_30 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__CHEETAH_TRCTL */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__IPU1_CSI0_DA_EN */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__WEIM_WEIM_D_0 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__PCIE_CTRL_MUX_14 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__GPIO_5_20 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__MMDC_DEBUG_31 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__CHEETAH_TRCLK */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__WEIM_WEIM_D_1 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__PCIE_CTRL_MUX_15 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__GPIO_5_21 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__MMDC_DEBUG_32 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__CHEETAH_TRACE_0 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__IPU1_CSI0_D_4 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__WEIM_WEIM_D_2 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 2, 0x07F4, 3), /* MX6Q_PAD_CSI0_DAT4__ECSPI1_SCLK */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 3, 0x08E8, 2), /* MX6Q_PAD_CSI0_DAT4__KPP_COL_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__GPIO_5_22 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__MMDC_DEBUG_43 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__CHEETAH_TRACE_1 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__IPU1_CSI0_D_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__WEIM_WEIM_D_3 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 2, 0x07FC, 3), /* MX6Q_PAD_CSI0_DAT5__ECSPI1_MOSI */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 3, 0x08F4, 1), /* MX6Q_PAD_CSI0_DAT5__KPP_ROW_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__GPIO_5_23 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__MMDC_MMDC_DEBUG_44 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__CHEETAH_TRACE_2 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__IPU1_CSI0_D_6 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__WEIM_WEIM_D_4 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 2, 0x07F8, 3), /* MX6Q_PAD_CSI0_DAT6__ECSPI1_MISO */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 3, 0x08EC, 1), /* MX6Q_PAD_CSI0_DAT6__KPP_COL_6 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__GPIO_5_24 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__MMDC_MMDC_DEBUG_45 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__CHEETAH_TRACE_3 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__IPU1_CSI0_D_7 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__WEIM_WEIM_D_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 2, 0x0800, 3), /* MX6Q_PAD_CSI0_DAT7__ECSPI1_SS0 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 3, 0x08F8, 2), /* MX6Q_PAD_CSI0_DAT7__KPP_ROW_6 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__GPIO_5_25 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__MMDC_MMDC_DEBUG_46 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__CHEETAH_TRACE_4 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__IPU1_CSI0_D_8 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__WEIM_WEIM_D_6 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 2, 0x0810, 2), /* MX6Q_PAD_CSI0_DAT8__ECSPI2_SCLK */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 3, 0x08F0, 2), /* MX6Q_PAD_CSI0_DAT8__KPP_COL_7 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 4, 0x089C, 1), /* MX6Q_PAD_CSI0_DAT8__I2C1_SDA */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__GPIO_5_26 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__MMDC_MMDC_DEBUG_47 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__CHEETAH_TRACE_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__IPU1_CSI0_D_9 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__WEIM_WEIM_D_7 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 2, 0x0818, 2), /* MX6Q_PAD_CSI0_DAT9__ECSPI2_MOSI */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 3, 0x08FC, 2), /* MX6Q_PAD_CSI0_DAT9__KPP_ROW_7 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 4, 0x0898, 1), /* MX6Q_PAD_CSI0_DAT9__I2C1_SCL */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__GPIO_5_27 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__MMDC_MMDC_DEBUG_48 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__CHEETAH_TRACE_6 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__IPU1_CSI0_D_10 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 2, 0x0814, 2), /* MX6Q_PAD_CSI0_DAT10__ECSPI2_MISO */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__UART1_TXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__GPIO_5_28 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__MMDC_MMDC_DEBUG_33 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__CHEETAH_TRACE_7 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__IPU1_CSI0_D_11 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 2, 0x081C, 2), /* MX6Q_PAD_CSI0_DAT11__ECSPI2_SS0 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 3, 0x0920, 1), /* MX6Q_PAD_CSI0_DAT11__UART1_RXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__GPIO_5_29 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__MMDC_MMDC_DEBUG_34 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__CHEETAH_TRACE_8 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__IPU1_CSI0_D_12 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__WEIM_WEIM_D_8 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__PCIE_CTRL_MUX_16 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__UART4_TXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__GPIO_5_30 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__MMDC_MMDC_DEBUG_35 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__CHEETAH_TRACE_9 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__IPU1_CSI0_D_13 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__WEIM_WEIM_D_9 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__PCIE_CTRL_MUX_17 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 3, 0x0938, 3), /* MX6Q_PAD_CSI0_DAT13__UART4_RXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__GPIO_5_31 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__MMDC_MMDC_DEBUG_36 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__CHEETAH_TRACE_10 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__IPU1_CSI0_D_14 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__WEIM_WEIM_D_10 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__PCIE_CTRL_MUX_18 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__UART5_TXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__GPIO_6_0 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__MMDC_MMDC_DEBUG_37 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__CHEETAH_TRACE_11 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__IPU1_CSI0_D_15 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__WEIM_WEIM_D_11 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__PCIE_CTRL_MUX_19 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 3, 0x0940, 3), /* MX6Q_PAD_CSI0_DAT15__UART5_RXD */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__GPIO_6_1 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__MMDC_MMDC_DEBUG_38 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__CHEETAH_TRACE_12 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__IPU1_CSI0_D_16 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__WEIM_WEIM_D_12 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__PCIE_CTRL_MUX_20 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 3, 0x0934, 0), /* MX6Q_PAD_CSI0_DAT16__UART4_RTS */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__GPIO_6_2 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__MMDC_MMDC_DEBUG_39 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__CHEETAH_TRACE_13 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__IPU1_CSI0_D_17 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__WEIM_WEIM_D_13 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__PCIE_CTRL_MUX_21 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 3, 0x0934, 1), /* MX6Q_PAD_CSI0_DAT17__UART4_CTS */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__GPIO_6_3 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__MMDC_MMDC_DEBUG_40 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__CHEETAH_TRACE_14 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__IPU1_CSI0_D_18 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__WEIM_WEIM_D_14 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__PCIE_CTRL_MUX_22 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 3, 0x093C, 2), /* MX6Q_PAD_CSI0_DAT18__UART5_RTS */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__GPIO_6_4 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__MMDC_MMDC_DEBUG_41 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__CHEETAH_TRACE_15 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__IPU1_CSI0_D_19 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__WEIM_WEIM_D_15 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__PCIE_CTRL_MUX_23 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 3, 0x093C, 3), /* MX6Q_PAD_CSI0_DAT19__UART5_CTS */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__GPIO_6_5 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__MMDC_MMDC_DEBUG_42 */
- IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__ANATOP_TESTO_9 */
- IMX_PIN_REG(MX6Q_PAD_JTAG_TMS, 0x0678, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TMS__SJC_TMS */
- IMX_PIN_REG(MX6Q_PAD_JTAG_MOD, 0x067C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_MOD__SJC_MOD */
- IMX_PIN_REG(MX6Q_PAD_JTAG_TRSTB, 0x0680, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TRSTB__SJC_TRSTB */
- IMX_PIN_REG(MX6Q_PAD_JTAG_TDI, 0x0684, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TDI__SJC_TDI */
- IMX_PIN_REG(MX6Q_PAD_JTAG_TCK, 0x0688, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TCK__SJC_TCK */
- IMX_PIN_REG(MX6Q_PAD_JTAG_TDO, 0x068C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TDO__SJC_TDO */
- IMX_PIN_REG(MX6Q_PAD_LVDS1_TX3_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 */
- IMX_PIN_REG(MX6Q_PAD_LVDS1_TX2_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 */
- IMX_PIN_REG(MX6Q_PAD_LVDS1_CLK_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK */
- IMX_PIN_REG(MX6Q_PAD_LVDS1_TX1_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 */
- IMX_PIN_REG(MX6Q_PAD_LVDS1_TX0_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 */
- IMX_PIN_REG(MX6Q_PAD_LVDS0_TX3_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 */
- IMX_PIN_REG(MX6Q_PAD_LVDS0_CLK_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK */
- IMX_PIN_REG(MX6Q_PAD_LVDS0_TX2_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 */
- IMX_PIN_REG(MX6Q_PAD_LVDS0_TX1_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 */
- IMX_PIN_REG(MX6Q_PAD_LVDS0_TX0_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 */
- IMX_PIN_REG(MX6Q_PAD_TAMPER, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_TAMPER__SNVS_LP_WRAP_SNVS_TD1 */
- IMX_PIN_REG(MX6Q_PAD_PMIC_ON_REQ, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_PMIC_ON_REQ__SNVS_LPWRAP_WKALM */
- IMX_PIN_REG(MX6Q_PAD_PMIC_STBY_REQ, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_PMIC_STBY_REQ__CCM_PMIC_STBYRQ */
- IMX_PIN_REG(MX6Q_PAD_POR_B, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_POR_B__SRC_POR_B */
- IMX_PIN_REG(MX6Q_PAD_BOOT_MODE1, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_BOOT_MODE1__SRC_BOOT_MODE_1 */
- IMX_PIN_REG(MX6Q_PAD_RESET_IN_B, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_RESET_IN_B__SRC_RESET_B */
- IMX_PIN_REG(MX6Q_PAD_BOOT_MODE0, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_BOOT_MODE0__SRC_BOOT_MODE_0 */
- IMX_PIN_REG(MX6Q_PAD_TEST_MODE, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_TEST_MODE__TCU_TEST_MODE */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 1, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__UART1_TXD */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__PCIE_CTRL_MUX_24 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USBOH3_UH3_DFD_OUT_0 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USBOH3_UH2_DFD_OUT_0 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__GPIO_6_17 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__MIPI_CORE_DPHY_IN_12 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USBPHY2_CLK20DIV */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 1, 0x0920, 3), /* MX6Q_PAD_SD3_DAT6__UART1_RXD */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__PCIE_CTRL_MUX_25 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__USBOH3_UH3_DFD_OUT_1 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__USBOH3_UH2_DFD_OUT_1 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__GPIO_6_18 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__MIPI_CORE_DPHY_IN_13 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__ANATOP_TESTO_10 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 1, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__UART2_TXD */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__PCIE_CTRL_MUX_26 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__USBOH3_UH3_DFD_OUT_2 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__USBOH3_UH2_DFD_OUT_2 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__GPIO_7_0 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__MIPI_CORE_DPHY_IN_14 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__ANATOP_TESTO_11 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 1, 0x0928, 5), /* MX6Q_PAD_SD3_DAT4__UART2_RXD */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__PCIE_CTRL_MUX_27 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__USBOH3_UH3_DFD_OUT_3 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__USBOH3_UH2_DFD_OUT_3 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__GPIO_7_1 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__MIPI_CORE_DPHY_IN_15 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__ANATOP_TESTO_12 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 0, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 1, 0x0924, 2), /* MX6Q_PAD_SD3_CMD__UART2_CTS */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 2, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__CAN1_TXCAN */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 3, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__USBOH3_UH3_DFD_OUT_4 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 4, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__USBOH3_UH2_DFD_OUT_4 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 5, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__GPIO_7_2 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 6, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__MIPI_CORE_DPHY_IN_16 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 7, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__ANATOP_TESTO_13 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 0, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__USDHC3_CLK */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 1, 0x0924, 3), /* MX6Q_PAD_SD3_CLK__UART2_RTS */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 2, 0x07E4, 2), /* MX6Q_PAD_SD3_CLK__CAN1_RXCAN */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 3, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__USBOH3_UH3_DFD_OUT_5 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 4, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__USBOH3_UH2_DFD_OUT_5 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 5, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__GPIO_7_3 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 6, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__MIPI_CORE_DPHY_IN_17 */
- IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 7, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__ANATOP_TESTO_14 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 1, 0x091C, 2), /* MX6Q_PAD_SD3_DAT0__UART1_CTS */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__CAN2_TXCAN */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__USBOH3_UH3_DFD_OUT_6 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__USBOH3_UH2_DFD_OUT_6 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__GPIO_7_4 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__MIPI_CORE_DPHY_IN_18 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__ANATOP_TESTO_15 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 1, 0x091C, 3), /* MX6Q_PAD_SD3_DAT1__UART1_RTS */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 2, 0x07E8, 1), /* MX6Q_PAD_SD3_DAT1__CAN2_RXCAN */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__USBOH3_UH3_DFD_OUT_7 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__USBOH3_UH2_DFD_OUT_7 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__GPIO_7_5 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__MIPI_CORE_DPHY_IN_19 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__ANATOP_TESTI_0 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__PCIE_CTRL_MUX_28 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__USBOH3_UH3_DFD_OUT_8 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__USBOH3_UH2_DFD_OUT_8 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__GPIO_7_6 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__MIPI_CORE_DPHY_IN_20 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__ANATOP_TESTI_1 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 1, 0x092C, 4), /* MX6Q_PAD_SD3_DAT3__UART3_CTS */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__PCIE_CTRL_MUX_29 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__USBOH3_UH3_DFD_OUT_9 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__USBOH3_UH2_DFD_OUT_9 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__GPIO_7_7 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__MIPI_CORE_DPHY_IN_21 */
- IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__ANATOP_TESTI_2 */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 0, 0x0000, 0), /* MX6Q_PAD_SD3_RST__USDHC3_RST */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 1, 0x092C, 5), /* MX6Q_PAD_SD3_RST__UART3_RTS */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 2, 0x0000, 0), /* MX6Q_PAD_SD3_RST__PCIE_CTRL_MUX_30 */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 3, 0x0000, 0), /* MX6Q_PAD_SD3_RST__USBOH3_UH3_DFD_OUT_10 */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 4, 0x0000, 0), /* MX6Q_PAD_SD3_RST__USBOH3_UH2_DFD_OUT_10 */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 5, 0x0000, 0), /* MX6Q_PAD_SD3_RST__GPIO_7_8 */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 6, 0x0000, 0), /* MX6Q_PAD_SD3_RST__MIPI_CORE_DPHY_IN_22 */
- IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 7, 0x0000, 0), /* MX6Q_PAD_SD3_RST__ANATOP_ANATOP_TESTI_3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__RAWNAND_CLE */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__IPU2_SISG_4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__PCIE_CTRL_MUX_31 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__USBOH3_UH3_DFD_OT11 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__USBOH3_UH2_DFD_OT11 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__GPIO_6_7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__MIPI_CORE_DPHY_IN23 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__TPSMP_HTRANS_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__RAWNAND_ALE */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__USDHC4_RST */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__PCIE_CTRL_MUX_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__USBOH3_UH3_DFD_OT12 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__USBOH3_UH2_DFD_OT12 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__GPIO_6_8 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__MIPI_CR_DPHY_IN_24 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__TPSMP_HTRANS_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__RAWNAND_RESETN */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__IPU2_SISG_5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__PCIE_CTRL__MUX_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__USBOH3_UH3_DFDOT13 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__USBOH3_UH2_DFDOT13 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__GPIO_6_9 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__MIPI_CR_DPHY_OUT32 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__PL301_PER1_HSIZE_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__RAWNAND_READY0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__IPU2_DI0_PIN1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__PCIE_CTRL_MUX_2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__USBOH3_UH3_DFD_OT14 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__USBOH3_UH2_DFD_OT14 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__GPIO_6_10 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__MIPI_CR_DPHY_OUT_33 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__PL301_PER1_HSIZE_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__RAWNAND_CE0N */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__USBOH3_UH3_DFD_OT15 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__USBOH3_UH2_DFD_OT15 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__GPIO_6_11 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__PL301_PER1_HSIZE_2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__RAWNAND_CE1N */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__USDHC4_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__USDHC3_VSELECT */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__PCIE_CTRL_MUX_3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__GPIO_6_14 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__PL301_PER1_HRDYOUT */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__RAWNAND_CE2N */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__IPU1_SISG_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 2, 0x0874, 1), /* MX6Q_PAD_NANDF_CS2__ESAI1_TX0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__WEIM_WEIM_CRE */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__CCM_CLKO2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__IPU2_SISG_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__RAWNAND_CE3N */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__IPU1_SISG_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 2, 0x0878, 1), /* MX6Q_PAD_NANDF_CS3__ESAI1_TX1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__WEIM_WEIM_A_26 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__PCIE_CTRL_MUX_4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__GPIO_6_16 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__IPU2_SISG_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__TPSMP_CLK */
- IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 0, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
- IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 1, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__RAWNAND_RDN */
- IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 2, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__UART3_TXD */
- IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 4, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__PCIE_CTRL_MUX_5 */
- IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 5, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__GPIO_7_9 */
- IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 7, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__TPSMP_HDATA_DIR */
- IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 0, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__USDHC4_CLK */
- IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 1, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__RAWNAND_WRN */
- IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 2, 0x0930, 3), /* MX6Q_PAD_SD4_CLK__UART3_RXD */
- IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 4, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__PCIE_CTRL_MUX_6 */
- IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 5, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__GPIO_7_10 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__RAWNAND_D0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__USDHC1_DAT4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__GPU3D_GPU_DBG_OUT_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__USBOH3_UH2_DFD_OUT16 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__USBOH3_UH3_DFD_OUT16 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__IPU1_IPU_DIAG_BUS_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__IPU2_IPU_DIAG_BUS_0 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__RAWNAND_D1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__USDHC1_DAT5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__GPU3D_GPU_DEBUG_OUT1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__USBOH3_UH2_DFD_OUT17 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__USBOH3_UH3_DFD_OUT17 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__IPU1_IPU_DIAG_BUS_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__IPU2_IPU_DIAG_BUS_1 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__RAWNAND_D2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__USDHC1_DAT6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__GPU3D_GPU_DBG_OUT_2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__USBOH3_UH2_DFD_OUT18 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__USBOH3_UH3_DFD_OUT18 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__IPU1_IPU_DIAG_BUS_2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__IPU2_IPU_DIAG_BUS_2 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__RAWNAND_D3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__USDHC1_DAT7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__GPU3D_GPU_DBG_OUT_3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__USBOH3_UH2_DFD_OUT19 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__USBOH3_UH3_DFD_OUT19 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__GPIO_2_3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__IPU1_IPU_DIAG_BUS_3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__IPU2_IPU_DIAG_BUS_3 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__RAWNAND_D4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__USDHC2_DAT4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__GPU3D_GPU_DBG_OUT_4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__USBOH3_UH2_DFD_OUT20 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__USBOH3_UH3_DFD_OUT20 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__GPIO_2_4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__IPU1_IPU_DIAG_BUS_4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__IPU2_IPU_DIAG_BUS_4 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__RAWNAND_D5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__USDHC2_DAT5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__GPU3D_GPU_DBG_OUT_5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__USBOH3_UH2_DFD_OUT21 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__USBOH3_UH3_DFD_OUT21 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__GPIO_2_5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__IPU1_IPU_DIAG_BUS_5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__IPU2_IPU_DIAG_BUS_5 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__RAWNAND_D6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__USDHC2_DAT6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__GPU3D_GPU_DBG_OUT_6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__USBOH3_UH2_DFD_OUT22 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__USBOH3_UH3_DFD_OUT22 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__GPIO_2_6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__IPU1_IPU_DIAG_BUS_6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__IPU2_IPU_DIAG_BUS_6 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__RAWNAND_D7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__USDHC2_DAT7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__GPU3D_GPU_DBG_OUT_7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__USBOH3_UH2_DFD_OUT23 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__USBOH3_UH3_DFD_OUT23 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__GPIO_2_7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__IPU1_IPU_DIAG_BUS_7 */
- IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__IPU2_IPU_DIAG_BUS_7 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__RAWNAND_D8 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__RAWNAND_DQS */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__USBOH3_UH2_DFD_OUT24 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__USBOH3_UH3_DFD_OUT24 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__GPIO_2_8 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__IPU1_IPU_DIAG_BUS_8 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__IPU2_IPU_DIAG_BUS_8 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__RAWNAND_D9 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__PWM3_PWMO */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__USBOH3_UH2_DFD_OUT25 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__USBOH3_UH3_DFD_OUT25 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__GPIO_2_9 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__IPU1_IPU_DIAG_BUS_9 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__IPU2_IPU_DIAG_BUS_9 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__RAWNAND_D10 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__PWM4_PWMO */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__USBOH3_UH2_DFD_OUT26 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__USBOH3_UH3_DFD_OUT26 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__GPIO_2_10 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__IPU1_IPU_DIAG_BUS_10 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__IPU2_IPU_DIAG_BUS_10 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__RAWNAND_D11 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__USBOH3_UH2_DFD_OUT27 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__USBOH3_UH3_DFD_OUT27 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__GPIO_2_11 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__IPU1_IPU_DIAG_BUS_11 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__IPU2_IPU_DIAG_BUS_11 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__RAWNAND_D12 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 2, 0x0928, 6), /* MX6Q_PAD_SD4_DAT4__UART2_RXD */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__USBOH3_UH2_DFD_OUT28 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__USBOH3_UH3_DFD_OUT28 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__GPIO_2_12 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__IPU1_IPU_DIAG_BUS_12 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__IPU2_IPU_DIAG_BUS_12 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__RAWNAND_D13 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 2, 0x0924, 4), /* MX6Q_PAD_SD4_DAT5__UART2_RTS */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__USBOH3_UH2_DFD_OUT29 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__USBOH3_UH3_DFD_OUT29 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__GPIO_2_13 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__IPU1_IPU_DIAG_BUS_13 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__IPU2_IPU_DIAG_BUS_13 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__RAWNAND_D14 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 2, 0x0924, 5), /* MX6Q_PAD_SD4_DAT6__UART2_CTS */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__USBOH3_UH2_DFD_OUT30 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__USBOH3_UH3_DFD_OUT30 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__GPIO_2_14 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__IPU1_IPU_DIAG_BUS_14 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__IPU2_IPU_DIAG_BUS_14 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__RAWNAND_D15 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__UART2_TXD */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__USBOH3_UH2_DFD_OUT31 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__USBOH3_UH3_DFD_OUT31 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__GPIO_2_15 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__IPU1_IPU_DIAG_BUS_15 */
- IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__IPU2_IPU_DIAG_BUS_15 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__USDHC1_DAT1 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 1, 0x0834, 1), /* MX6Q_PAD_SD1_DAT1__ECSPI5_SS0 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__PWM3_PWMO */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__GPT_CAPIN2 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__PCIE_CTRL_MUX_7 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__GPIO_1_17 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__HDMI_TX_OPHYDTB_0 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__ANATOP_TESTO_8 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__USDHC1_DAT0 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 1, 0x082C, 1), /* MX6Q_PAD_SD1_DAT0__ECSPI5_MISO */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__CAAM_WRAP_RNG_OSCOBS */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__GPT_CAPIN1 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__PCIE_CTRL_MUX_8 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__GPIO_1_16 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__HDMI_TX_OPHYDTB_1 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__ANATOP_TESTO_7 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__USDHC1_DAT3 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 1, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__ECSPI5_SS2 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__GPT_CMPOUT3 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__PWM1_PWMO */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__WDOG2_WDOG_B */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__GPIO_1_21 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__WDOG2_WDOG_RST_B_DEB */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__ANATOP_TESTO_6 */
- IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 0, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__USDHC1_CMD */
- IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 1, 0x0830, 0), /* MX6Q_PAD_SD1_CMD__ECSPI5_MOSI */
- IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 2, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__PWM4_PWMO */
- IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 3, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__GPT_CMPOUT1 */
- IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 5, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__GPIO_1_18 */
- IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 7, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__ANATOP_TESTO_5 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__USDHC1_DAT2 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 1, 0x0838, 1), /* MX6Q_PAD_SD1_DAT2__ECSPI5_SS1 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__GPT_CMPOUT2 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__PWM2_PWMO */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__WDOG1_WDOG_B */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__GPIO_1_19 */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__WDOG1_WDOG_RST_B_DEB */
- IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__ANATOP_TESTO_4 */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 0, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__USDHC1_CLK */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 1, 0x0828, 0), /* MX6Q_PAD_SD1_CLK__ECSPI5_SCLK */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 2, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__OSC32K_32K_OUT */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 3, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__GPT_CLKIN */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 5, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__GPIO_1_20 */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 6, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__PHY_DTB_0 */
- IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 7, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__SATA_PHY_DTB_0 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 0, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__USDHC2_CLK */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 1, 0x0828, 1), /* MX6Q_PAD_SD2_CLK__ECSPI5_SCLK */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 2, 0x08E8, 3), /* MX6Q_PAD_SD2_CLK__KPP_COL_5 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 3, 0x07C0, 1), /* MX6Q_PAD_SD2_CLK__AUDMUX_AUD4_RXFS */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 4, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__PCIE_CTRL_MUX_9 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 5, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__GPIO_1_10 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 6, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__PHY_DTB_1 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 7, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__SATA_PHY_DTB_1 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 0, 0x0000, 0), /* MX6Q_PAD_SD2_CMD__USDHC2_CMD */
- IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 1, 0x0830, 1), /* MX6Q_PAD_SD2_CMD__ECSPI5_MOSI */
- IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 2, 0x08F4, 2), /* MX6Q_PAD_SD2_CMD__KPP_ROW_5 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 3, 0x07BC, 1), /* MX6Q_PAD_SD2_CMD__AUDMUX_AUD4_RXC */
- IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 4, 0x0000, 0), /* MX6Q_PAD_SD2_CMD__PCIE_CTRL_MUX_10 */
- IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 5, 0x0000, 0), /* MX6Q_PAD_SD2_CMD__GPIO_1_11 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__USDHC2_DAT3 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 1, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ECSPI5_SS3 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 2, 0x08EC, 2), /* MX6Q_PAD_SD2_DAT3__KPP_COL_6 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 3, 0x07C4, 1), /* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 4, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
- IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
- IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
- IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
+ MX6Q_PAD_RESERVE0 = 0,
+ MX6Q_PAD_RESERVE1 = 1,
+ MX6Q_PAD_RESERVE2 = 2,
+ MX6Q_PAD_RESERVE3 = 3,
+ MX6Q_PAD_RESERVE4 = 4,
+ MX6Q_PAD_RESERVE5 = 5,
+ MX6Q_PAD_RESERVE6 = 6,
+ MX6Q_PAD_RESERVE7 = 7,
+ MX6Q_PAD_RESERVE8 = 8,
+ MX6Q_PAD_RESERVE9 = 9,
+ MX6Q_PAD_RESERVE10 = 10,
+ MX6Q_PAD_RESERVE11 = 11,
+ MX6Q_PAD_RESERVE12 = 12,
+ MX6Q_PAD_RESERVE13 = 13,
+ MX6Q_PAD_RESERVE14 = 14,
+ MX6Q_PAD_RESERVE15 = 15,
+ MX6Q_PAD_RESERVE16 = 16,
+ MX6Q_PAD_RESERVE17 = 17,
+ MX6Q_PAD_RESERVE18 = 18,
+ MX6Q_PAD_SD2_DAT1 = 19,
+ MX6Q_PAD_SD2_DAT2 = 20,
+ MX6Q_PAD_SD2_DAT0 = 21,
+ MX6Q_PAD_RGMII_TXC = 22,
+ MX6Q_PAD_RGMII_TD0 = 23,
+ MX6Q_PAD_RGMII_TD1 = 24,
+ MX6Q_PAD_RGMII_TD2 = 25,
+ MX6Q_PAD_RGMII_TD3 = 26,
+ MX6Q_PAD_RGMII_RX_CTL = 27,
+ MX6Q_PAD_RGMII_RD0 = 28,
+ MX6Q_PAD_RGMII_TX_CTL = 29,
+ MX6Q_PAD_RGMII_RD1 = 30,
+ MX6Q_PAD_RGMII_RD2 = 31,
+ MX6Q_PAD_RGMII_RD3 = 32,
+ MX6Q_PAD_RGMII_RXC = 33,
+ MX6Q_PAD_EIM_A25 = 34,
+ MX6Q_PAD_EIM_EB2 = 35,
+ MX6Q_PAD_EIM_D16 = 36,
+ MX6Q_PAD_EIM_D17 = 37,
+ MX6Q_PAD_EIM_D18 = 38,
+ MX6Q_PAD_EIM_D19 = 39,
+ MX6Q_PAD_EIM_D20 = 40,
+ MX6Q_PAD_EIM_D21 = 41,
+ MX6Q_PAD_EIM_D22 = 42,
+ MX6Q_PAD_EIM_D23 = 43,
+ MX6Q_PAD_EIM_EB3 = 44,
+ MX6Q_PAD_EIM_D24 = 45,
+ MX6Q_PAD_EIM_D25 = 46,
+ MX6Q_PAD_EIM_D26 = 47,
+ MX6Q_PAD_EIM_D27 = 48,
+ MX6Q_PAD_EIM_D28 = 49,
+ MX6Q_PAD_EIM_D29 = 50,
+ MX6Q_PAD_EIM_D30 = 51,
+ MX6Q_PAD_EIM_D31 = 52,
+ MX6Q_PAD_EIM_A24 = 53,
+ MX6Q_PAD_EIM_A23 = 54,
+ MX6Q_PAD_EIM_A22 = 55,
+ MX6Q_PAD_EIM_A21 = 56,
+ MX6Q_PAD_EIM_A20 = 57,
+ MX6Q_PAD_EIM_A19 = 58,
+ MX6Q_PAD_EIM_A18 = 59,
+ MX6Q_PAD_EIM_A17 = 60,
+ MX6Q_PAD_EIM_A16 = 61,
+ MX6Q_PAD_EIM_CS0 = 62,
+ MX6Q_PAD_EIM_CS1 = 63,
+ MX6Q_PAD_EIM_OE = 64,
+ MX6Q_PAD_EIM_RW = 65,
+ MX6Q_PAD_EIM_LBA = 66,
+ MX6Q_PAD_EIM_EB0 = 67,
+ MX6Q_PAD_EIM_EB1 = 68,
+ MX6Q_PAD_EIM_DA0 = 69,
+ MX6Q_PAD_EIM_DA1 = 70,
+ MX6Q_PAD_EIM_DA2 = 71,
+ MX6Q_PAD_EIM_DA3 = 72,
+ MX6Q_PAD_EIM_DA4 = 73,
+ MX6Q_PAD_EIM_DA5 = 74,
+ MX6Q_PAD_EIM_DA6 = 75,
+ MX6Q_PAD_EIM_DA7 = 76,
+ MX6Q_PAD_EIM_DA8 = 77,
+ MX6Q_PAD_EIM_DA9 = 78,
+ MX6Q_PAD_EIM_DA10 = 79,
+ MX6Q_PAD_EIM_DA11 = 80,
+ MX6Q_PAD_EIM_DA12 = 81,
+ MX6Q_PAD_EIM_DA13 = 82,
+ MX6Q_PAD_EIM_DA14 = 83,
+ MX6Q_PAD_EIM_DA15 = 84,
+ MX6Q_PAD_EIM_WAIT = 85,
+ MX6Q_PAD_EIM_BCLK = 86,
+ MX6Q_PAD_DI0_DISP_CLK = 87,
+ MX6Q_PAD_DI0_PIN15 = 88,
+ MX6Q_PAD_DI0_PIN2 = 89,
+ MX6Q_PAD_DI0_PIN3 = 90,
+ MX6Q_PAD_DI0_PIN4 = 91,
+ MX6Q_PAD_DISP0_DAT0 = 92,
+ MX6Q_PAD_DISP0_DAT1 = 93,
+ MX6Q_PAD_DISP0_DAT2 = 94,
+ MX6Q_PAD_DISP0_DAT3 = 95,
+ MX6Q_PAD_DISP0_DAT4 = 96,
+ MX6Q_PAD_DISP0_DAT5 = 97,
+ MX6Q_PAD_DISP0_DAT6 = 98,
+ MX6Q_PAD_DISP0_DAT7 = 99,
+ MX6Q_PAD_DISP0_DAT8 = 100,
+ MX6Q_PAD_DISP0_DAT9 = 101,
+ MX6Q_PAD_DISP0_DAT10 = 102,
+ MX6Q_PAD_DISP0_DAT11 = 103,
+ MX6Q_PAD_DISP0_DAT12 = 104,
+ MX6Q_PAD_DISP0_DAT13 = 105,
+ MX6Q_PAD_DISP0_DAT14 = 106,
+ MX6Q_PAD_DISP0_DAT15 = 107,
+ MX6Q_PAD_DISP0_DAT16 = 108,
+ MX6Q_PAD_DISP0_DAT17 = 109,
+ MX6Q_PAD_DISP0_DAT18 = 110,
+ MX6Q_PAD_DISP0_DAT19 = 111,
+ MX6Q_PAD_DISP0_DAT20 = 112,
+ MX6Q_PAD_DISP0_DAT21 = 113,
+ MX6Q_PAD_DISP0_DAT22 = 114,
+ MX6Q_PAD_DISP0_DAT23 = 115,
+ MX6Q_PAD_ENET_MDIO = 116,
+ MX6Q_PAD_ENET_REF_CLK = 117,
+ MX6Q_PAD_ENET_RX_ER = 118,
+ MX6Q_PAD_ENET_CRS_DV = 119,
+ MX6Q_PAD_ENET_RXD1 = 120,
+ MX6Q_PAD_ENET_RXD0 = 121,
+ MX6Q_PAD_ENET_TX_EN = 122,
+ MX6Q_PAD_ENET_TXD1 = 123,
+ MX6Q_PAD_ENET_TXD0 = 124,
+ MX6Q_PAD_ENET_MDC = 125,
+ MX6Q_PAD_KEY_COL0 = 126,
+ MX6Q_PAD_KEY_ROW0 = 127,
+ MX6Q_PAD_KEY_COL1 = 128,
+ MX6Q_PAD_KEY_ROW1 = 129,
+ MX6Q_PAD_KEY_COL2 = 130,
+ MX6Q_PAD_KEY_ROW2 = 131,
+ MX6Q_PAD_KEY_COL3 = 132,
+ MX6Q_PAD_KEY_ROW3 = 133,
+ MX6Q_PAD_KEY_COL4 = 134,
+ MX6Q_PAD_KEY_ROW4 = 135,
+ MX6Q_PAD_GPIO_0 = 136,
+ MX6Q_PAD_GPIO_1 = 137,
+ MX6Q_PAD_GPIO_9 = 138,
+ MX6Q_PAD_GPIO_3 = 139,
+ MX6Q_PAD_GPIO_6 = 140,
+ MX6Q_PAD_GPIO_2 = 141,
+ MX6Q_PAD_GPIO_4 = 142,
+ MX6Q_PAD_GPIO_5 = 143,
+ MX6Q_PAD_GPIO_7 = 144,
+ MX6Q_PAD_GPIO_8 = 145,
+ MX6Q_PAD_GPIO_16 = 146,
+ MX6Q_PAD_GPIO_17 = 147,
+ MX6Q_PAD_GPIO_18 = 148,
+ MX6Q_PAD_GPIO_19 = 149,
+ MX6Q_PAD_CSI0_PIXCLK = 150,
+ MX6Q_PAD_CSI0_MCLK = 151,
+ MX6Q_PAD_CSI0_DATA_EN = 152,
+ MX6Q_PAD_CSI0_VSYNC = 153,
+ MX6Q_PAD_CSI0_DAT4 = 154,
+ MX6Q_PAD_CSI0_DAT5 = 155,
+ MX6Q_PAD_CSI0_DAT6 = 156,
+ MX6Q_PAD_CSI0_DAT7 = 157,
+ MX6Q_PAD_CSI0_DAT8 = 158,
+ MX6Q_PAD_CSI0_DAT9 = 159,
+ MX6Q_PAD_CSI0_DAT10 = 160,
+ MX6Q_PAD_CSI0_DAT11 = 161,
+ MX6Q_PAD_CSI0_DAT12 = 162,
+ MX6Q_PAD_CSI0_DAT13 = 163,
+ MX6Q_PAD_CSI0_DAT14 = 164,
+ MX6Q_PAD_CSI0_DAT15 = 165,
+ MX6Q_PAD_CSI0_DAT16 = 166,
+ MX6Q_PAD_CSI0_DAT17 = 167,
+ MX6Q_PAD_CSI0_DAT18 = 168,
+ MX6Q_PAD_CSI0_DAT19 = 169,
+ MX6Q_PAD_SD3_DAT7 = 170,
+ MX6Q_PAD_SD3_DAT6 = 171,
+ MX6Q_PAD_SD3_DAT5 = 172,
+ MX6Q_PAD_SD3_DAT4 = 173,
+ MX6Q_PAD_SD3_CMD = 174,
+ MX6Q_PAD_SD3_CLK = 175,
+ MX6Q_PAD_SD3_DAT0 = 176,
+ MX6Q_PAD_SD3_DAT1 = 177,
+ MX6Q_PAD_SD3_DAT2 = 178,
+ MX6Q_PAD_SD3_DAT3 = 179,
+ MX6Q_PAD_SD3_RST = 180,
+ MX6Q_PAD_NANDF_CLE = 181,
+ MX6Q_PAD_NANDF_ALE = 182,
+ MX6Q_PAD_NANDF_WP_B = 183,
+ MX6Q_PAD_NANDF_RB0 = 184,
+ MX6Q_PAD_NANDF_CS0 = 185,
+ MX6Q_PAD_NANDF_CS1 = 186,
+ MX6Q_PAD_NANDF_CS2 = 187,
+ MX6Q_PAD_NANDF_CS3 = 188,
+ MX6Q_PAD_SD4_CMD = 189,
+ MX6Q_PAD_SD4_CLK = 190,
+ MX6Q_PAD_NANDF_D0 = 191,
+ MX6Q_PAD_NANDF_D1 = 192,
+ MX6Q_PAD_NANDF_D2 = 193,
+ MX6Q_PAD_NANDF_D3 = 194,
+ MX6Q_PAD_NANDF_D4 = 195,
+ MX6Q_PAD_NANDF_D5 = 196,
+ MX6Q_PAD_NANDF_D6 = 197,
+ MX6Q_PAD_NANDF_D7 = 198,
+ MX6Q_PAD_SD4_DAT0 = 199,
+ MX6Q_PAD_SD4_DAT1 = 200,
+ MX6Q_PAD_SD4_DAT2 = 201,
+ MX6Q_PAD_SD4_DAT3 = 202,
+ MX6Q_PAD_SD4_DAT4 = 203,
+ MX6Q_PAD_SD4_DAT5 = 204,
+ MX6Q_PAD_SD4_DAT6 = 205,
+ MX6Q_PAD_SD4_DAT7 = 206,
+ MX6Q_PAD_SD1_DAT1 = 207,
+ MX6Q_PAD_SD1_DAT0 = 208,
+ MX6Q_PAD_SD1_DAT3 = 209,
+ MX6Q_PAD_SD1_CMD = 210,
+ MX6Q_PAD_SD1_DAT2 = 211,
+ MX6Q_PAD_SD1_CLK = 212,
+ MX6Q_PAD_SD2_CLK = 213,
+ MX6Q_PAD_SD2_CMD = 214,
+ MX6Q_PAD_SD2_DAT3 = 215,
};
/* Pad names for the pinmux subsystem */
static const struct pinctrl_pin_desc imx6q_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE11),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE16),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE17),
+ IMX_PINCTRL_PIN(MX6Q_PAD_RESERVE18),
IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT1),
IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT2),
IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT0),
@@ -2063,117 +369,6 @@ static const struct pinctrl_pin_desc imx6q_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX6Q_PAD_ENET_TXD1),
IMX_PINCTRL_PIN(MX6Q_PAD_ENET_TXD0),
IMX_PINCTRL_PIN(MX6Q_PAD_ENET_MDC),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D40),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D41),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D42),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D43),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D44),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D45),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D46),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D47),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS5),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM5),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D32),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D33),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D34),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D35),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D36),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D37),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D38),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D39),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM4),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS4),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D24),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D25),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D26),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D27),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D28),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D29),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS3),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D30),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D31),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM3),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D16),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D17),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D18),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D19),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D20),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D21),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D22),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS2),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D23),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM2),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A2),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A3),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A4),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A5),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A6),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A7),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A8),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A9),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A10),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A11),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A12),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A13),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A14),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A15),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_CAS),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_CS0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_CS1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_RAS),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_RESET),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDBA0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDBA1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCLK_0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDBA2),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCKE0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCLK_1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCKE1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDODT0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDODT1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDWE),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D2),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D3),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D4),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D5),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D6),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D7),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM0),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D8),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D9),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D10),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D11),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D12),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D13),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D14),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D15),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM1),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D48),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D49),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D50),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D51),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D52),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D53),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D54),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D55),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS6),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM6),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D56),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS7),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D57),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D58),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D59),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D60),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM7),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D61),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D62),
- IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D63),
IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL0),
IMX_PINCTRL_PIN(MX6Q_PAD_KEY_ROW0),
IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL1),
@@ -2218,30 +413,6 @@ static const struct pinctrl_pin_desc imx6q_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT17),
IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT18),
IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT19),
- IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TMS),
- IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_MOD),
- IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TRSTB),
- IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TDI),
- IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TCK),
- IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TDO),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX3_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX2_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_CLK_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX1_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX0_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX3_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_CLK_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX2_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX1_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX0_P),
- IMX_PINCTRL_PIN(MX6Q_PAD_TAMPER),
- IMX_PINCTRL_PIN(MX6Q_PAD_PMIC_ON_REQ),
- IMX_PINCTRL_PIN(MX6Q_PAD_PMIC_STBY_REQ),
- IMX_PINCTRL_PIN(MX6Q_PAD_POR_B),
- IMX_PINCTRL_PIN(MX6Q_PAD_BOOT_MODE1),
- IMX_PINCTRL_PIN(MX6Q_PAD_RESET_IN_B),
- IMX_PINCTRL_PIN(MX6Q_PAD_BOOT_MODE0),
- IMX_PINCTRL_PIN(MX6Q_PAD_TEST_MODE),
IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT7),
IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT6),
IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT5),
@@ -2293,8 +464,6 @@ static const struct pinctrl_pin_desc imx6q_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx6q_pinctrl_info = {
.pins = imx6q_pinctrl_pads,
.npins = ARRAY_SIZE(imx6q_pinctrl_pads),
- .pin_regs = imx6q_pin_regs,
- .npin_regs = ARRAY_SIZE(imx6q_pin_regs),
};
static struct of_device_id imx6q_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/pinctrl-imx6sl.c b/drivers/pinctrl/pinctrl-imx6sl.c
new file mode 100644
index 0000000..4eb7cca
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx6sl.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6sl_pads {
+ MX6SL_PAD_RESERVE0 = 0,
+ MX6SL_PAD_RESERVE1 = 1,
+ MX6SL_PAD_RESERVE2 = 2,
+ MX6SL_PAD_RESERVE3 = 3,
+ MX6SL_PAD_RESERVE4 = 4,
+ MX6SL_PAD_RESERVE5 = 5,
+ MX6SL_PAD_RESERVE6 = 6,
+ MX6SL_PAD_RESERVE7 = 7,
+ MX6SL_PAD_RESERVE8 = 8,
+ MX6SL_PAD_RESERVE9 = 9,
+ MX6SL_PAD_RESERVE10 = 10,
+ MX6SL_PAD_RESERVE11 = 11,
+ MX6SL_PAD_RESERVE12 = 12,
+ MX6SL_PAD_RESERVE13 = 13,
+ MX6SL_PAD_RESERVE14 = 14,
+ MX6SL_PAD_RESERVE15 = 15,
+ MX6SL_PAD_RESERVE16 = 16,
+ MX6SL_PAD_RESERVE17 = 17,
+ MX6SL_PAD_RESERVE18 = 18,
+ MX6SL_PAD_AUD_MCLK = 19,
+ MX6SL_PAD_AUD_RXC = 20,
+ MX6SL_PAD_AUD_RXD = 21,
+ MX6SL_PAD_AUD_RXFS = 22,
+ MX6SL_PAD_AUD_TXC = 23,
+ MX6SL_PAD_AUD_TXD = 24,
+ MX6SL_PAD_AUD_TXFS = 25,
+ MX6SL_PAD_ECSPI1_MISO = 26,
+ MX6SL_PAD_ECSPI1_MOSI = 27,
+ MX6SL_PAD_ECSPI1_SCLK = 28,
+ MX6SL_PAD_ECSPI1_SS0 = 29,
+ MX6SL_PAD_ECSPI2_MISO = 30,
+ MX6SL_PAD_ECSPI2_MOSI = 31,
+ MX6SL_PAD_ECSPI2_SCLK = 32,
+ MX6SL_PAD_ECSPI2_SS0 = 33,
+ MX6SL_PAD_EPDC_BDR0 = 34,
+ MX6SL_PAD_EPDC_BDR1 = 35,
+ MX6SL_PAD_EPDC_D0 = 36,
+ MX6SL_PAD_EPDC_D1 = 37,
+ MX6SL_PAD_EPDC_D10 = 38,
+ MX6SL_PAD_EPDC_D11 = 39,
+ MX6SL_PAD_EPDC_D12 = 40,
+ MX6SL_PAD_EPDC_D13 = 41,
+ MX6SL_PAD_EPDC_D14 = 42,
+ MX6SL_PAD_EPDC_D15 = 43,
+ MX6SL_PAD_EPDC_D2 = 44,
+ MX6SL_PAD_EPDC_D3 = 45,
+ MX6SL_PAD_EPDC_D4 = 46,
+ MX6SL_PAD_EPDC_D5 = 47,
+ MX6SL_PAD_EPDC_D6 = 48,
+ MX6SL_PAD_EPDC_D7 = 49,
+ MX6SL_PAD_EPDC_D8 = 50,
+ MX6SL_PAD_EPDC_D9 = 51,
+ MX6SL_PAD_EPDC_GDCLK = 52,
+ MX6SL_PAD_EPDC_GDOE = 53,
+ MX6SL_PAD_EPDC_GDRL = 54,
+ MX6SL_PAD_EPDC_GDSP = 55,
+ MX6SL_PAD_EPDC_PWRCOM = 56,
+ MX6SL_PAD_EPDC_PWRCTRL0 = 57,
+ MX6SL_PAD_EPDC_PWRCTRL1 = 58,
+ MX6SL_PAD_EPDC_PWRCTRL2 = 59,
+ MX6SL_PAD_EPDC_PWRCTRL3 = 60,
+ MX6SL_PAD_EPDC_PWRINT = 61,
+ MX6SL_PAD_EPDC_PWRSTAT = 62,
+ MX6SL_PAD_EPDC_PWRWAKEUP = 63,
+ MX6SL_PAD_EPDC_SDCE0 = 64,
+ MX6SL_PAD_EPDC_SDCE1 = 65,
+ MX6SL_PAD_EPDC_SDCE2 = 66,
+ MX6SL_PAD_EPDC_SDCE3 = 67,
+ MX6SL_PAD_EPDC_SDCLK = 68,
+ MX6SL_PAD_EPDC_SDLE = 69,
+ MX6SL_PAD_EPDC_SDOE = 70,
+ MX6SL_PAD_EPDC_SDSHR = 71,
+ MX6SL_PAD_EPDC_VCOM0 = 72,
+ MX6SL_PAD_EPDC_VCOM1 = 73,
+ MX6SL_PAD_FEC_CRS_DV = 74,
+ MX6SL_PAD_FEC_MDC = 75,
+ MX6SL_PAD_FEC_MDIO = 76,
+ MX6SL_PAD_FEC_REF_CLK = 77,
+ MX6SL_PAD_FEC_RX_ER = 78,
+ MX6SL_PAD_FEC_RXD0 = 79,
+ MX6SL_PAD_FEC_RXD1 = 80,
+ MX6SL_PAD_FEC_TX_CLK = 81,
+ MX6SL_PAD_FEC_TX_EN = 82,
+ MX6SL_PAD_FEC_TXD0 = 83,
+ MX6SL_PAD_FEC_TXD1 = 84,
+ MX6SL_PAD_HSIC_DAT = 85,
+ MX6SL_PAD_HSIC_STROBE = 86,
+ MX6SL_PAD_I2C1_SCL = 87,
+ MX6SL_PAD_I2C1_SDA = 88,
+ MX6SL_PAD_I2C2_SCL = 89,
+ MX6SL_PAD_I2C2_SDA = 90,
+ MX6SL_PAD_KEY_COL0 = 91,
+ MX6SL_PAD_KEY_COL1 = 92,
+ MX6SL_PAD_KEY_COL2 = 93,
+ MX6SL_PAD_KEY_COL3 = 94,
+ MX6SL_PAD_KEY_COL4 = 95,
+ MX6SL_PAD_KEY_COL5 = 96,
+ MX6SL_PAD_KEY_COL6 = 97,
+ MX6SL_PAD_KEY_COL7 = 98,
+ MX6SL_PAD_KEY_ROW0 = 99,
+ MX6SL_PAD_KEY_ROW1 = 100,
+ MX6SL_PAD_KEY_ROW2 = 101,
+ MX6SL_PAD_KEY_ROW3 = 102,
+ MX6SL_PAD_KEY_ROW4 = 103,
+ MX6SL_PAD_KEY_ROW5 = 104,
+ MX6SL_PAD_KEY_ROW6 = 105,
+ MX6SL_PAD_KEY_ROW7 = 106,
+ MX6SL_PAD_LCD_CLK = 107,
+ MX6SL_PAD_LCD_DAT0 = 108,
+ MX6SL_PAD_LCD_DAT1 = 109,
+ MX6SL_PAD_LCD_DAT10 = 110,
+ MX6SL_PAD_LCD_DAT11 = 111,
+ MX6SL_PAD_LCD_DAT12 = 112,
+ MX6SL_PAD_LCD_DAT13 = 113,
+ MX6SL_PAD_LCD_DAT14 = 114,
+ MX6SL_PAD_LCD_DAT15 = 115,
+ MX6SL_PAD_LCD_DAT16 = 116,
+ MX6SL_PAD_LCD_DAT17 = 117,
+ MX6SL_PAD_LCD_DAT18 = 118,
+ MX6SL_PAD_LCD_DAT19 = 119,
+ MX6SL_PAD_LCD_DAT2 = 120,
+ MX6SL_PAD_LCD_DAT20 = 121,
+ MX6SL_PAD_LCD_DAT21 = 122,
+ MX6SL_PAD_LCD_DAT22 = 123,
+ MX6SL_PAD_LCD_DAT23 = 124,
+ MX6SL_PAD_LCD_DAT3 = 125,
+ MX6SL_PAD_LCD_DAT4 = 126,
+ MX6SL_PAD_LCD_DAT5 = 127,
+ MX6SL_PAD_LCD_DAT6 = 128,
+ MX6SL_PAD_LCD_DAT7 = 129,
+ MX6SL_PAD_LCD_DAT8 = 130,
+ MX6SL_PAD_LCD_DAT9 = 131,
+ MX6SL_PAD_LCD_ENABLE = 132,
+ MX6SL_PAD_LCD_HSYNC = 133,
+ MX6SL_PAD_LCD_RESET = 134,
+ MX6SL_PAD_LCD_VSYNC = 135,
+ MX6SL_PAD_PWM1 = 136,
+ MX6SL_PAD_REF_CLK_24M = 137,
+ MX6SL_PAD_REF_CLK_32K = 138,
+ MX6SL_PAD_SD1_CLK = 139,
+ MX6SL_PAD_SD1_CMD = 140,
+ MX6SL_PAD_SD1_DAT0 = 141,
+ MX6SL_PAD_SD1_DAT1 = 142,
+ MX6SL_PAD_SD1_DAT2 = 143,
+ MX6SL_PAD_SD1_DAT3 = 144,
+ MX6SL_PAD_SD1_DAT4 = 145,
+ MX6SL_PAD_SD1_DAT5 = 146,
+ MX6SL_PAD_SD1_DAT6 = 147,
+ MX6SL_PAD_SD1_DAT7 = 148,
+ MX6SL_PAD_SD2_CLK = 149,
+ MX6SL_PAD_SD2_CMD = 150,
+ MX6SL_PAD_SD2_DAT0 = 151,
+ MX6SL_PAD_SD2_DAT1 = 152,
+ MX6SL_PAD_SD2_DAT2 = 153,
+ MX6SL_PAD_SD2_DAT3 = 154,
+ MX6SL_PAD_SD2_DAT4 = 155,
+ MX6SL_PAD_SD2_DAT5 = 156,
+ MX6SL_PAD_SD2_DAT6 = 157,
+ MX6SL_PAD_SD2_DAT7 = 158,
+ MX6SL_PAD_SD2_RST = 159,
+ MX6SL_PAD_SD3_CLK = 160,
+ MX6SL_PAD_SD3_CMD = 161,
+ MX6SL_PAD_SD3_DAT0 = 162,
+ MX6SL_PAD_SD3_DAT1 = 163,
+ MX6SL_PAD_SD3_DAT2 = 164,
+ MX6SL_PAD_SD3_DAT3 = 165,
+ MX6SL_PAD_UART1_RXD = 166,
+ MX6SL_PAD_UART1_TXD = 167,
+ MX6SL_PAD_WDOG_B = 168,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6sl_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE11),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE16),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE17),
+ IMX_PINCTRL_PIN(MX6SL_PAD_RESERVE18),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_MCLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_RXC),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_RXD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_RXFS),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_TXC),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_TXD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_AUD_TXFS),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_BDR0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_BDR1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D10),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D11),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D12),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D13),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D14),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D15),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D8),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_D9),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_GDCLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_GDOE),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_GDRL),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_GDSP),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRCOM),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRCTRL0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRCTRL1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRCTRL2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRCTRL3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRINT),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRSTAT),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_PWRWAKEUP),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDCE0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDCE1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDCE2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDCE3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDCLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDLE),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDOE),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_SDSHR),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_VCOM0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_EPDC_VCOM1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_CRS_DV),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_MDC),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_MDIO),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_REF_CLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_RX_ER),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_RXD0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_RXD1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_TX_CLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_TX_EN),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_TXD0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_FEC_TXD1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_HSIC_DAT),
+ IMX_PINCTRL_PIN(MX6SL_PAD_HSIC_STROBE),
+ IMX_PINCTRL_PIN(MX6SL_PAD_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX6SL_PAD_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX6SL_PAD_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX6SL_PAD_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_COL7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_KEY_ROW7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_CLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT10),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT11),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT12),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT13),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT14),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT15),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT16),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT17),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT18),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT19),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT20),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT21),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT22),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT23),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT8),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_DAT9),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_ENABLE),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_HSYNC),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_RESET),
+ IMX_PINCTRL_PIN(MX6SL_PAD_LCD_VSYNC),
+ IMX_PINCTRL_PIN(MX6SL_PAD_PWM1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_REF_CLK_24M),
+ IMX_PINCTRL_PIN(MX6SL_PAD_REF_CLK_32K),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD1_DAT7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_CLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_CMD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT4),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT5),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT6),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_DAT7),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD2_RST),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD3_CLK),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD3_CMD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD3_DAT0),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD3_DAT1),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD3_DAT2),
+ IMX_PINCTRL_PIN(MX6SL_PAD_SD3_DAT3),
+ IMX_PINCTRL_PIN(MX6SL_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(MX6SL_PAD_WDOG_B),
+};
+
+static struct imx_pinctrl_soc_info imx6sl_pinctrl_info = {
+ .pins = imx6sl_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx6sl_pinctrl_pads),
+};
+
+static struct of_device_id imx6sl_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx6sl-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx6sl_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx6sl_pinctrl_info);
+}
+
+static struct platform_driver imx6sl_pinctrl_driver = {
+ .driver = {
+ .name = "imx6sl-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(imx6sl_pinctrl_of_match),
+ },
+ .probe = imx6sl_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
+};
+
+static int __init imx6sl_pinctrl_init(void)
+{
+ return platform_driver_register(&imx6sl_pinctrl_driver);
+}
+arch_initcall(imx6sl_pinctrl_init);
+
+static void __exit imx6sl_pinctrl_exit(void)
+{
+ platform_driver_unregister(&imx6sl_pinctrl_driver);
+}
+module_exit(imx6sl_pinctrl_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale imx6sl pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 435bf30..3428175 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -33,7 +34,6 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/pinctrl-nomadik.h>
-#include <asm/mach/irq.h>
#include "pinctrl-nomadik.h"
#include "core.h"
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 4f54faf..9763668 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -970,6 +970,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos4210_pin_ctrl },
{ .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
+ { .compatible = "samsung,exynos5250-pinctrl",
+ .data = (void *)exynos5250_pin_ctrl },
#endif
#ifdef CONFIG_PINCTRL_S3C64XX
{ .compatible = "samsung,s3c64xx-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 45f27b4..7c7f9eb 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -244,6 +244,7 @@ struct samsung_pmx_func {
/* list of all exported SoC specific data */
extern struct samsung_pin_ctrl exynos4210_pin_ctrl[];
extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
+extern struct samsung_pin_ctrl exynos5250_pin_ctrl[];
extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index fb90625..bc9d1be 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
@@ -25,7 +26,6 @@
#include <linux/bitops.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <asm/mach/irq.h>
#define DRIVER_NAME "pinmux-sirf"
@@ -1347,7 +1347,7 @@ static inline int sirfsoc_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct sirfsoc_gpio_bank *bank = container_of(to_of_mm_gpio_chip(chip),
struct sirfsoc_gpio_bank, chip);
- return irq_find_mapping(bank->domain, offset);
+ return irq_create_mapping(bank->domain, offset);
}
static inline int sirfsoc_gpio_to_offset(unsigned int gpio)
@@ -1485,7 +1485,6 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
struct sirfsoc_gpio_bank *bank = irq_get_handler_data(irq);
u32 status, ctrl;
int idx = 0;
- unsigned int first_irq;
struct irq_chip *chip = irq_get_chip(irq);
chained_irq_enter(chip, desc);
@@ -1499,8 +1498,6 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
return;
}
- first_irq = bank->domain->revmap_data.legacy.first_irq;
-
while (status) {
ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, idx));
@@ -1511,7 +1508,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
pr_debug("%s: gpio id %d idx %d happens\n",
__func__, bank->id, idx);
- generic_handle_irq(first_irq + idx);
+ generic_handle_irq(irq_find_mapping(bank->domain, idx));
}
idx++;
@@ -1764,9 +1761,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)
goto out;
}
- bank->domain = irq_domain_add_legacy(np, SIRFSOC_GPIO_BANK_SIZE,
- SIRFSOC_GPIO_IRQ_START + i * SIRFSOC_GPIO_BANK_SIZE, 0,
- &sirfsoc_gpio_irq_simple_ops, bank);
+ bank->domain = irq_domain_add_linear(np, SIRFSOC_GPIO_BANK_SIZE,
+ &sirfsoc_gpio_irq_simple_ops, bank);
if (!bank->domain) {
pr_err("%s: Failed to create irqdomain\n", np->full_name);
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index c3340f5..0e1f99c 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -10,6 +10,7 @@ config PINCTRL_SH_PFC
select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
select PINMUX
select PINCONF
+ select GENERIC_PINCONF
def_bool y
help
This enables pin control drivers for SH and SH Mobile platforms
@@ -21,6 +22,11 @@ config GPIO_SH_PFC
This enables support for GPIOs within the SoC's pin function
controller.
+config PINCTRL_PFC_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
def_bool y
depends on ARCH_R8A7740
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index e8b9562..211cd8e 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -3,6 +3,7 @@ ifeq ($(CONFIG_GPIO_SH_PFC),y)
sh-pfc-objs += gpio.o
endif
obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
+obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 970ddff..b551336 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -10,7 +10,6 @@
*/
#define DRV_NAME "sh-pfc"
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/err.h>
@@ -30,10 +29,8 @@ static int sh_pfc_ioremap(struct sh_pfc *pfc, struct platform_device *pdev)
struct resource *res;
int k;
- if (pdev->num_resources == 0) {
- pfc->num_windows = 0;
- return 0;
- }
+ if (pdev->num_resources == 0)
+ return -EINVAL;
pfc->window = devm_kzalloc(pfc->dev, pdev->num_resources *
sizeof(*pfc->window), GFP_NOWAIT);
@@ -59,11 +56,11 @@ static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc,
unsigned long address)
{
struct sh_pfc_window *window;
- int k;
+ unsigned int i;
/* scan through physical windows and convert address */
- for (k = 0; k < pfc->num_windows; k++) {
- window = pfc->window + k;
+ for (i = 0; i < pfc->num_windows; i++) {
+ window = pfc->window + i;
if (address < window->phys)
continue;
@@ -74,11 +71,33 @@ static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc,
return window->virt + (address - window->phys);
}
- /* no windows defined, register must be 1:1 mapped virt:phys */
- return (void __iomem *)address;
+ BUG();
+ return NULL;
+}
+
+int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin)
+{
+ unsigned int offset;
+ unsigned int i;
+
+ if (pfc->info->ranges == NULL)
+ return pin;
+
+ for (i = 0, offset = 0; i < pfc->info->nr_ranges; ++i) {
+ const struct pinmux_range *range = &pfc->info->ranges[i];
+
+ if (pin <= range->end)
+ return pin >= range->begin
+ ? offset + pin - range->begin : -1;
+
+ offset += range->end - range->begin + 1;
+ }
+
+ return -EINVAL;
}
-static int sh_pfc_enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
+static int sh_pfc_enum_in_range(pinmux_enum_t enum_id,
+ const struct pinmux_range *r)
{
if (enum_id < r->begin)
return 0;
@@ -89,8 +108,8 @@ static int sh_pfc_enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
return 1;
}
-static unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width)
+unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width)
{
switch (reg_width) {
case 8:
@@ -105,8 +124,8 @@ static unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
return 0;
}
-static void sh_pfc_write_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width, unsigned long data)
+void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned long reg_width,
+ unsigned long data)
{
switch (reg_width) {
case 8:
@@ -123,39 +142,8 @@ static void sh_pfc_write_raw_reg(void __iomem *mapped_reg,
BUG();
}
-int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos)
-{
- unsigned long pos;
-
- pos = dr->reg_width - (in_pos + 1);
-
- pr_debug("read_bit: addr = %lx, pos = %ld, "
- "r_width = %ld\n", dr->reg, pos, dr->reg_width);
-
- return (sh_pfc_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
-}
-
-void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
- unsigned long value)
-{
- unsigned long pos;
-
- pos = dr->reg_width - (in_pos + 1);
-
- pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
- "r_width = %ld\n",
- dr->reg, !!value, pos, dr->reg_width);
-
- if (value)
- set_bit(pos, &dr->reg_shadow);
- else
- clear_bit(pos, &dr->reg_shadow);
-
- sh_pfc_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
-}
-
static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
+ const struct pinmux_cfg_reg *crp,
unsigned long in_pos,
void __iomem **mapped_regp,
unsigned long *maskp,
@@ -176,24 +164,8 @@ static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
}
}
-static int sh_pfc_read_config_reg(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long field)
-{
- void __iomem *mapped_reg;
- unsigned long mask, pos;
-
- sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
-
- pr_debug("read_reg: addr = %lx, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
- crp->reg, field, crp->reg_width, crp->field_width);
-
- return (sh_pfc_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
-}
-
static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
+ const struct pinmux_cfg_reg *crp,
unsigned long field, unsigned long value)
{
void __iomem *mapped_reg;
@@ -201,9 +173,9 @@ static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
- pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
- crp->reg, value, field, crp->reg_width, crp->field_width);
+ dev_dbg(pfc->dev, "write_reg addr = %lx, value = %ld, field = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ crp->reg, value, field, crp->reg_width, crp->field_width);
mask = ~(mask << pos);
value = value << pos;
@@ -220,83 +192,11 @@ static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
sh_pfc_write_raw_reg(mapped_reg, crp->reg_width, data);
}
-static int sh_pfc_setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
-{
- struct pinmux_gpio *gpiop = &pfc->info->gpios[gpio];
- struct pinmux_data_reg *data_reg;
- int k, n;
-
- if (!sh_pfc_enum_in_range(gpiop->enum_id, &pfc->info->data))
- return -1;
-
- k = 0;
- while (1) {
- data_reg = pfc->info->data_regs + k;
-
- if (!data_reg->reg_width)
- break;
-
- data_reg->mapped_reg = sh_pfc_phys_to_virt(pfc, data_reg->reg);
-
- for (n = 0; n < data_reg->reg_width; n++) {
- if (data_reg->enum_ids[n] == gpiop->enum_id) {
- gpiop->flags &= ~PINMUX_FLAG_DREG;
- gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
- gpiop->flags &= ~PINMUX_FLAG_DBIT;
- gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
- return 0;
- }
- }
- k++;
- }
-
- BUG();
-
- return -1;
-}
-
-static void sh_pfc_setup_data_regs(struct sh_pfc *pfc)
-{
- struct pinmux_data_reg *drp;
- int k;
-
- for (k = pfc->info->first_gpio; k <= pfc->info->last_gpio; k++)
- sh_pfc_setup_data_reg(pfc, k);
-
- k = 0;
- while (1) {
- drp = pfc->info->data_regs + k;
-
- if (!drp->reg_width)
- break;
-
- drp->reg_shadow = sh_pfc_read_raw_reg(drp->mapped_reg,
- drp->reg_width);
- k++;
- }
-}
-
-int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
- struct pinmux_data_reg **drp, int *bitp)
-{
- struct pinmux_gpio *gpiop = &pfc->info->gpios[gpio];
- int k, n;
-
- if (!sh_pfc_enum_in_range(gpiop->enum_id, &pfc->info->data))
- return -1;
-
- k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
- n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
- *drp = pfc->info->data_regs + k;
- *bitp = n;
- return 0;
-}
-
static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp, int *fieldp,
- int *valuep, unsigned long **cntp)
+ const struct pinmux_cfg_reg **crp, int *fieldp,
+ int *valuep)
{
- struct pinmux_cfg_reg *config_reg;
+ const struct pinmux_cfg_reg *config_reg;
unsigned long r_width, f_width, curr_width, ncomb;
int k, m, n, pos, bit_pos;
@@ -324,7 +224,6 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
*crp = config_reg;
*fieldp = m;
*valuep = n;
- *cntp = &config_reg->cnt[m];
return 0;
}
}
@@ -334,50 +233,42 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
k++;
}
- return -1;
+ return -EINVAL;
}
-int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
- pinmux_enum_t *enum_idp)
+static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, pinmux_enum_t mark, int pos,
+ pinmux_enum_t *enum_idp)
{
- pinmux_enum_t enum_id = pfc->info->gpios[gpio].enum_id;
- pinmux_enum_t *data = pfc->info->gpio_data;
+ const pinmux_enum_t *data = pfc->info->gpio_data;
int k;
- if (!sh_pfc_enum_in_range(enum_id, &pfc->info->data)) {
- if (!sh_pfc_enum_in_range(enum_id, &pfc->info->mark)) {
- pr_err("non data/mark enum_id for gpio %d\n", gpio);
- return -1;
- }
- }
-
if (pos) {
*enum_idp = data[pos + 1];
return pos + 1;
}
for (k = 0; k < pfc->info->gpio_data_size; k++) {
- if (data[k] == enum_id) {
+ if (data[k] == mark) {
*enum_idp = data[k + 1];
return k + 1;
}
}
- pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
- return -1;
+ dev_err(pfc->dev, "cannot locate data/mark enum_id for mark %d\n",
+ mark);
+ return -EINVAL;
}
-int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
- int cfg_mode)
+int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
{
- struct pinmux_cfg_reg *cr = NULL;
+ const struct pinmux_cfg_reg *cr = NULL;
pinmux_enum_t enum_id;
- struct pinmux_range *range;
+ const struct pinmux_range *range;
int in_range, pos, field, value;
- unsigned long *cntp;
+ int ret;
switch (pinmux_type) {
-
+ case PINMUX_TYPE_GPIO:
case PINMUX_TYPE_FUNCTION:
range = NULL;
break;
@@ -399,33 +290,37 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
break;
default:
- goto out_err;
+ return -EINVAL;
}
pos = 0;
enum_id = 0;
field = 0;
value = 0;
+
+ /* Iterate over all the configuration fields we need to update. */
while (1) {
- pos = sh_pfc_gpio_to_enum(pfc, gpio, pos, &enum_id);
- if (pos <= 0)
- goto out_err;
+ pos = sh_pfc_mark_to_enum(pfc, mark, pos, &enum_id);
+ if (pos < 0)
+ return pos;
if (!enum_id)
break;
- /* first check if this is a function enum */
+ /* Check if the configuration field selects a function. If it
+ * doesn't, skip the field if it's not applicable to the
+ * requested pinmux type.
+ */
in_range = sh_pfc_enum_in_range(enum_id, &pfc->info->function);
if (!in_range) {
- /* not a function enum */
- if (range) {
- /*
- * other range exists, so this pin is
- * a regular GPIO pin that now is being
- * bound to a specific direction.
- *
- * for this case we only allow function enums
- * and the enums that match the other range.
+ if (pinmux_type == PINMUX_TYPE_FUNCTION) {
+ /* Functions are allowed to modify all
+ * fields.
+ */
+ in_range = 1;
+ } else if (pinmux_type != PINMUX_TYPE_GPIO) {
+ /* Input/output types can only modify fields
+ * that correspond to their respective ranges.
*/
in_range = sh_pfc_enum_in_range(enum_id, range);
@@ -436,60 +331,29 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
*/
if (in_range && enum_id == range->force)
continue;
- } else {
- /*
- * no other range exists, so this pin
- * must then be of the function type.
- *
- * allow function type pins to select
- * any combination of function/in/out
- * in their MARK lists.
- */
- in_range = 1;
}
+ /* GPIOs are only allowed to modify function fields. */
}
if (!in_range)
continue;
- if (sh_pfc_get_config_reg(pfc, enum_id, &cr,
- &field, &value, &cntp) != 0)
- goto out_err;
-
- switch (cfg_mode) {
- case GPIO_CFG_DRYRUN:
- if (!*cntp ||
- (sh_pfc_read_config_reg(pfc, cr, field) != value))
- continue;
- break;
-
- case GPIO_CFG_REQ:
- sh_pfc_write_config_reg(pfc, cr, field, value);
- *cntp = *cntp + 1;
- break;
+ ret = sh_pfc_get_config_reg(pfc, enum_id, &cr, &field, &value);
+ if (ret < 0)
+ return ret;
- case GPIO_CFG_FREE:
- *cntp = *cntp - 1;
- break;
- }
+ sh_pfc_write_config_reg(pfc, cr, field, value);
}
return 0;
- out_err:
- return -1;
}
static int sh_pfc_probe(struct platform_device *pdev)
{
- struct sh_pfc_soc_info *info;
+ const struct sh_pfc_soc_info *info;
struct sh_pfc *pfc;
int ret;
- /*
- * Ensure that the type encoding fits
- */
- BUILD_BUG_ON(PINMUX_FLAG_TYPE > ((1 << PINMUX_FLAG_DBIT_SHIFT) - 1));
-
info = pdev->id_entry->driver_data
? (void *)pdev->id_entry->driver_data : pdev->dev.platform_data;
if (info == NULL)
@@ -509,7 +373,6 @@ static int sh_pfc_probe(struct platform_device *pdev)
spin_lock_init(&pfc->lock);
pinctrl_provide_dummies();
- sh_pfc_setup_data_regs(pfc);
/*
* Initialize pinctrl bindings first
@@ -529,13 +392,13 @@ static int sh_pfc_probe(struct platform_device *pdev)
* PFC state as it is, given that there are already
* extant users of it that have succeeded by this point.
*/
- pr_notice("failed to init GPIO chip, ignoring...\n");
+ dev_notice(pfc->dev, "failed to init GPIO chip, ignoring...\n");
}
#endif
platform_set_drvdata(pdev, pfc);
- pr_info("%s support registered\n", info->name);
+ dev_info(pfc->dev, "%s support registered\n", info->name);
return 0;
}
@@ -555,6 +418,9 @@ static int sh_pfc_remove(struct platform_device *pdev)
}
static const struct platform_device_id sh_pfc_id_table[] = {
+#ifdef CONFIG_PINCTRL_PFC_R8A73A4
+ { "pfc-r8a73a4", (kernel_ulong_t)&r8a73a4_pinmux_info },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7740
{ "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
#endif
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index ba7c33c..89cb428 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -26,13 +26,17 @@ struct sh_pfc_pinctrl;
struct sh_pfc {
struct device *dev;
- struct sh_pfc_soc_info *info;
+ const struct sh_pfc_soc_info *info;
spinlock_t lock;
unsigned int num_windows;
struct sh_pfc_window *window;
+ unsigned int nr_pins;
+
struct sh_pfc_chip *gpio;
+ struct sh_pfc_chip *func;
+
struct sh_pfc_pinctrl *pinctrl;
};
@@ -42,31 +46,30 @@ int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc);
-int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos);
-void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
- unsigned long value);
-int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
- struct pinmux_data_reg **drp, int *bitp);
-int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
- pinmux_enum_t *enum_idp);
-int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
- int cfg_mode);
+unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width);
+void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned long reg_width,
+ unsigned long data);
+
+int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
+int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-extern struct sh_pfc_soc_info r8a7740_pinmux_info;
-extern struct sh_pfc_soc_info r8a7779_pinmux_info;
-extern struct sh_pfc_soc_info sh7203_pinmux_info;
-extern struct sh_pfc_soc_info sh7264_pinmux_info;
-extern struct sh_pfc_soc_info sh7269_pinmux_info;
-extern struct sh_pfc_soc_info sh7372_pinmux_info;
-extern struct sh_pfc_soc_info sh73a0_pinmux_info;
-extern struct sh_pfc_soc_info sh7720_pinmux_info;
-extern struct sh_pfc_soc_info sh7722_pinmux_info;
-extern struct sh_pfc_soc_info sh7723_pinmux_info;
-extern struct sh_pfc_soc_info sh7724_pinmux_info;
-extern struct sh_pfc_soc_info sh7734_pinmux_info;
-extern struct sh_pfc_soc_info sh7757_pinmux_info;
-extern struct sh_pfc_soc_info sh7785_pinmux_info;
-extern struct sh_pfc_soc_info sh7786_pinmux_info;
-extern struct sh_pfc_soc_info shx3_pinmux_info;
+extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
+extern const struct sh_pfc_soc_info sh7203_pinmux_info;
+extern const struct sh_pfc_soc_info sh7264_pinmux_info;
+extern const struct sh_pfc_soc_info sh7269_pinmux_info;
+extern const struct sh_pfc_soc_info sh7372_pinmux_info;
+extern const struct sh_pfc_soc_info sh73a0_pinmux_info;
+extern const struct sh_pfc_soc_info sh7720_pinmux_info;
+extern const struct sh_pfc_soc_info sh7722_pinmux_info;
+extern const struct sh_pfc_soc_info sh7723_pinmux_info;
+extern const struct sh_pfc_soc_info sh7724_pinmux_info;
+extern const struct sh_pfc_soc_info sh7734_pinmux_info;
+extern const struct sh_pfc_soc_info sh7757_pinmux_info;
+extern const struct sh_pfc_soc_info sh7785_pinmux_info;
+extern const struct sh_pfc_soc_info sh7786_pinmux_info;
+extern const struct sh_pfc_soc_info shx3_pinmux_info;
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index a535075..d37efa7 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -9,8 +9,6 @@
* for more details.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME " gpio: " fmt
-
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/init.h>
@@ -21,9 +19,23 @@
#include "core.h"
+struct sh_pfc_gpio_data_reg {
+ const struct pinmux_data_reg *info;
+ unsigned long shadow;
+};
+
+struct sh_pfc_gpio_pin {
+ u8 dbit;
+ u8 dreg;
+};
+
struct sh_pfc_chip {
- struct sh_pfc *pfc;
- struct gpio_chip gpio_chip;
+ struct sh_pfc *pfc;
+ struct gpio_chip gpio_chip;
+
+ struct sh_pfc_window *mem;
+ struct sh_pfc_gpio_data_reg *regs;
+ struct sh_pfc_gpio_pin *pins;
};
static struct sh_pfc_chip *gpio_to_pfc_chip(struct gpio_chip *gc)
@@ -36,143 +48,367 @@ static struct sh_pfc *gpio_to_pfc(struct gpio_chip *gc)
return gpio_to_pfc_chip(gc)->pfc;
}
-static int sh_gpio_request(struct gpio_chip *gc, unsigned offset)
+static void gpio_get_data_reg(struct sh_pfc_chip *chip, unsigned int gpio,
+ struct sh_pfc_gpio_data_reg **reg,
+ unsigned int *bit)
{
- return pinctrl_request_gpio(offset);
+ int idx = sh_pfc_get_pin_index(chip->pfc, gpio);
+ struct sh_pfc_gpio_pin *gpio_pin = &chip->pins[idx];
+
+ *reg = &chip->regs[gpio_pin->dreg];
+ *bit = gpio_pin->dbit;
}
-static void sh_gpio_free(struct gpio_chip *gc, unsigned offset)
+static unsigned long gpio_read_data_reg(struct sh_pfc_chip *chip,
+ const struct pinmux_data_reg *dreg)
{
- pinctrl_free_gpio(offset);
+ void __iomem *mem = dreg->reg - chip->mem->phys + chip->mem->virt;
+
+ return sh_pfc_read_raw_reg(mem, dreg->reg_width);
}
-static void sh_gpio_set_value(struct sh_pfc *pfc, unsigned gpio, int value)
+static void gpio_write_data_reg(struct sh_pfc_chip *chip,
+ const struct pinmux_data_reg *dreg,
+ unsigned long value)
{
- struct pinmux_data_reg *dr = NULL;
- int bit = 0;
+ void __iomem *mem = dreg->reg - chip->mem->phys + chip->mem->virt;
- if (sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
- BUG();
- else
- sh_pfc_write_bit(dr, bit, value);
+ sh_pfc_write_raw_reg(mem, dreg->reg_width, value);
}
-static int sh_gpio_get_value(struct sh_pfc *pfc, unsigned gpio)
+static void gpio_setup_data_reg(struct sh_pfc_chip *chip, unsigned gpio)
{
- struct pinmux_data_reg *dr = NULL;
- int bit = 0;
+ struct sh_pfc *pfc = chip->pfc;
+ struct sh_pfc_gpio_pin *gpio_pin = &chip->pins[gpio];
+ const struct sh_pfc_pin *pin = &pfc->info->pins[gpio];
+ const struct pinmux_data_reg *dreg;
+ unsigned int bit;
+ unsigned int i;
+
+ for (i = 0, dreg = pfc->info->data_regs; dreg->reg; ++i, ++dreg) {
+ for (bit = 0; bit < dreg->reg_width; bit++) {
+ if (dreg->enum_ids[bit] == pin->enum_id) {
+ gpio_pin->dreg = i;
+ gpio_pin->dbit = bit;
+ return;
+ }
+ }
+ }
- if (sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ BUG();
+}
+
+static int gpio_setup_data_regs(struct sh_pfc_chip *chip)
+{
+ struct sh_pfc *pfc = chip->pfc;
+ const struct pinmux_data_reg *dreg;
+ unsigned int i;
+
+ /* Count the number of data registers, allocate memory and initialize
+ * them.
+ */
+ for (i = 0; pfc->info->data_regs[i].reg_width; ++i)
+ ;
+
+ chip->regs = devm_kzalloc(pfc->dev, i * sizeof(*chip->regs),
+ GFP_KERNEL);
+ if (chip->regs == NULL)
+ return -ENOMEM;
+
+ for (i = 0, dreg = pfc->info->data_regs; dreg->reg_width; ++i, ++dreg) {
+ chip->regs[i].info = dreg;
+ chip->regs[i].shadow = gpio_read_data_reg(chip, dreg);
+ }
+
+ for (i = 0; i < pfc->info->nr_pins; i++) {
+ if (pfc->info->pins[i].enum_id == 0)
+ continue;
+
+ gpio_setup_data_reg(chip, i);
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pin GPIOs
+ */
+
+static int gpio_pin_request(struct gpio_chip *gc, unsigned offset)
+{
+ struct sh_pfc *pfc = gpio_to_pfc(gc);
+ int idx = sh_pfc_get_pin_index(pfc, offset);
+
+ if (idx < 0 || pfc->info->pins[idx].enum_id == 0)
return -EINVAL;
- return sh_pfc_read_bit(dr, bit);
+ return pinctrl_request_gpio(offset);
+}
+
+static void gpio_pin_free(struct gpio_chip *gc, unsigned offset)
+{
+ return pinctrl_free_gpio(offset);
+}
+
+static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
+ int value)
+{
+ struct sh_pfc_gpio_data_reg *reg;
+ unsigned long pos;
+ unsigned int bit;
+
+ gpio_get_data_reg(chip, offset, &reg, &bit);
+
+ pos = reg->info->reg_width - (bit + 1);
+
+ if (value)
+ set_bit(pos, &reg->shadow);
+ else
+ clear_bit(pos, &reg->shadow);
+
+ gpio_write_data_reg(chip, reg->info, reg->shadow);
}
-static int sh_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+static int gpio_pin_direction_input(struct gpio_chip *gc, unsigned offset)
{
return pinctrl_gpio_direction_input(offset);
}
-static int sh_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
+static int gpio_pin_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- sh_gpio_set_value(gpio_to_pfc(gc), offset, value);
+ gpio_pin_set_value(gpio_to_pfc_chip(gc), offset, value);
return pinctrl_gpio_direction_output(offset);
}
-static int sh_gpio_get(struct gpio_chip *gc, unsigned offset)
+static int gpio_pin_get(struct gpio_chip *gc, unsigned offset)
{
- return sh_gpio_get_value(gpio_to_pfc(gc), offset);
+ struct sh_pfc_chip *chip = gpio_to_pfc_chip(gc);
+ struct sh_pfc_gpio_data_reg *reg;
+ unsigned long pos;
+ unsigned int bit;
+
+ gpio_get_data_reg(chip, offset, &reg, &bit);
+
+ pos = reg->info->reg_width - (bit + 1);
+
+ return (gpio_read_data_reg(chip, reg->info) >> pos) & 1;
}
-static void sh_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static void gpio_pin_set(struct gpio_chip *gc, unsigned offset, int value)
{
- sh_gpio_set_value(gpio_to_pfc(gc), offset, value);
+ gpio_pin_set_value(gpio_to_pfc_chip(gc), offset, value);
}
-static int sh_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct sh_pfc *pfc = gpio_to_pfc(gc);
- pinmux_enum_t enum_id;
- pinmux_enum_t *enum_ids;
- int i, k, pos;
-
- pos = 0;
- enum_id = 0;
- while (1) {
- pos = sh_pfc_gpio_to_enum(pfc, offset, pos, &enum_id);
- if (pos <= 0 || !enum_id)
- break;
+ int i, k;
- for (i = 0; i < pfc->info->gpio_irq_size; i++) {
- enum_ids = pfc->info->gpio_irq[i].enum_ids;
- for (k = 0; enum_ids[k]; k++) {
- if (enum_ids[k] == enum_id)
- return pfc->info->gpio_irq[i].irq;
- }
+ for (i = 0; i < pfc->info->gpio_irq_size; i++) {
+ unsigned short *gpios = pfc->info->gpio_irq[i].gpios;
+
+ for (k = 0; gpios[k]; k++) {
+ if (gpios[k] == offset)
+ return pfc->info->gpio_irq[i].irq;
}
}
return -ENOSYS;
}
-static void sh_pfc_gpio_setup(struct sh_pfc_chip *chip)
+static int gpio_pin_setup(struct sh_pfc_chip *chip)
{
struct sh_pfc *pfc = chip->pfc;
struct gpio_chip *gc = &chip->gpio_chip;
+ int ret;
+
+ chip->pins = devm_kzalloc(pfc->dev, pfc->nr_pins * sizeof(*chip->pins),
+ GFP_KERNEL);
+ if (chip->pins == NULL)
+ return -ENOMEM;
+
+ ret = gpio_setup_data_regs(chip);
+ if (ret < 0)
+ return ret;
+
+ gc->request = gpio_pin_request;
+ gc->free = gpio_pin_free;
+ gc->direction_input = gpio_pin_direction_input;
+ gc->get = gpio_pin_get;
+ gc->direction_output = gpio_pin_direction_output;
+ gc->set = gpio_pin_set;
+ gc->to_irq = gpio_pin_to_irq;
- gc->request = sh_gpio_request;
- gc->free = sh_gpio_free;
- gc->direction_input = sh_gpio_direction_input;
- gc->get = sh_gpio_get;
- gc->direction_output = sh_gpio_direction_output;
- gc->set = sh_gpio_set;
- gc->to_irq = sh_gpio_to_irq;
+ gc->label = pfc->info->name;
+ gc->dev = pfc->dev;
+ gc->owner = THIS_MODULE;
+ gc->base = 0;
+ gc->ngpio = pfc->nr_pins;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Function GPIOs
+ */
+
+static int gpio_function_request(struct gpio_chip *gc, unsigned offset)
+{
+ static bool __print_once;
+ struct sh_pfc *pfc = gpio_to_pfc(gc);
+ unsigned int mark = pfc->info->func_gpios[offset].enum_id;
+ unsigned long flags;
+ int ret;
+
+ if (!__print_once) {
+ dev_notice(pfc->dev,
+ "Use of GPIO API for function requests is deprecated."
+ " Convert to pinctrl\n");
+ __print_once = true;
+ }
+
+ if (mark == 0)
+ return -EINVAL;
- WARN_ON(pfc->info->first_gpio != 0); /* needs testing */
+ spin_lock_irqsave(&pfc->lock, flags);
+ ret = sh_pfc_config_mux(pfc, mark, PINMUX_TYPE_FUNCTION);
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ return ret;
+}
+
+static void gpio_function_free(struct gpio_chip *gc, unsigned offset)
+{
+}
+
+static int gpio_function_setup(struct sh_pfc_chip *chip)
+{
+ struct sh_pfc *pfc = chip->pfc;
+ struct gpio_chip *gc = &chip->gpio_chip;
+
+ gc->request = gpio_function_request;
+ gc->free = gpio_function_free;
gc->label = pfc->info->name;
gc->owner = THIS_MODULE;
- gc->base = pfc->info->first_gpio;
- gc->ngpio = (pfc->info->last_gpio - pfc->info->first_gpio) + 1;
+ gc->base = pfc->nr_pins;
+ gc->ngpio = pfc->info->nr_func_gpios;
+
+ return 0;
}
-int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
+/* -----------------------------------------------------------------------------
+ * Register/unregister
+ */
+
+static struct sh_pfc_chip *
+sh_pfc_add_gpiochip(struct sh_pfc *pfc, int(*setup)(struct sh_pfc_chip *),
+ struct sh_pfc_window *mem)
{
struct sh_pfc_chip *chip;
int ret;
chip = devm_kzalloc(pfc->dev, sizeof(*chip), GFP_KERNEL);
if (unlikely(!chip))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ chip->mem = mem;
chip->pfc = pfc;
- sh_pfc_gpio_setup(chip);
+ ret = setup(chip);
+ if (ret < 0)
+ return ERR_PTR(ret);
ret = gpiochip_add(&chip->gpio_chip);
if (unlikely(ret < 0))
- return ret;
+ return ERR_PTR(ret);
+
+ dev_info(pfc->dev, "%s handling gpio %u -> %u\n",
+ chip->gpio_chip.label, chip->gpio_chip.base,
+ chip->gpio_chip.base + chip->gpio_chip.ngpio - 1);
+
+ return chip;
+}
+
+int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
+{
+ const struct pinmux_range *ranges;
+ struct pinmux_range def_range;
+ struct sh_pfc_chip *chip;
+ unsigned int nr_ranges;
+ unsigned int i;
+ int ret;
+
+ if (pfc->info->data_regs == NULL)
+ return 0;
+
+ /* Find the memory window that contain the GPIO registers. Boards that
+ * register a separate GPIO device will not supply a memory resource
+ * that covers the data registers. In that case don't try to handle
+ * GPIOs.
+ */
+ for (i = 0; i < pfc->num_windows; ++i) {
+ struct sh_pfc_window *window = &pfc->window[i];
+
+ if (pfc->info->data_regs[0].reg >= window->phys &&
+ pfc->info->data_regs[0].reg < window->phys + window->size)
+ break;
+ }
+
+ if (i == pfc->num_windows)
+ return 0;
+
+ /* Register the real GPIOs chip. */
+ chip = sh_pfc_add_gpiochip(pfc, gpio_pin_setup, &pfc->window[i]);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
pfc->gpio = chip;
- pr_info("%s handling gpio %d -> %d\n",
- pfc->info->name, pfc->info->first_gpio,
- pfc->info->last_gpio);
+ /* Register the GPIO to pin mappings. */
+ if (pfc->info->ranges == NULL) {
+ def_range.begin = 0;
+ def_range.end = pfc->info->nr_pins - 1;
+ ranges = &def_range;
+ nr_ranges = 1;
+ } else {
+ ranges = pfc->info->ranges;
+ nr_ranges = pfc->info->nr_ranges;
+ }
+
+ for (i = 0; i < nr_ranges; ++i) {
+ const struct pinmux_range *range = &ranges[i];
+
+ ret = gpiochip_add_pin_range(&chip->gpio_chip,
+ dev_name(pfc->dev),
+ range->begin, range->begin,
+ range->end - range->begin + 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Register the function GPIOs chip. */
+ if (pfc->info->nr_func_gpios == 0)
+ return 0;
+
+ chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ pfc->func = chip;
return 0;
}
int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
{
- struct sh_pfc_chip *chip = pfc->gpio;
+ int err;
int ret;
- ret = gpiochip_remove(&chip->gpio_chip);
- if (unlikely(ret < 0))
- return ret;
+ ret = gpiochip_remove(&pfc->gpio->gpio_chip);
+ err = gpiochip_remove(&pfc->func->gpio_chip);
- pfc->gpio = NULL;
- return 0;
+ return ret < 0 ? ret : err;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
new file mode 100644
index 0000000..bbff559
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -0,0 +1,2587 @@
+/*
+ * Copyright (C) 2012-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <mach/irqs.h>
+#include <mach/r8a73a4.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ /* Port0 - Port30 */ \
+ PORT_10(fn, pfx, sfx), \
+ PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), \
+ PORT_1(fn, pfx##30, sfx), \
+ /* Port32 - Port40 */ \
+ PORT_1(fn, pfx##32, sfx), PORT_1(fn, pfx##33, sfx), \
+ PORT_1(fn, pfx##34, sfx), PORT_1(fn, pfx##35, sfx), \
+ PORT_1(fn, pfx##36, sfx), PORT_1(fn, pfx##37, sfx), \
+ PORT_1(fn, pfx##38, sfx), PORT_1(fn, pfx##39, sfx), \
+ PORT_1(fn, pfx##40, sfx), \
+ /* Port64 - Port85 */ \
+ PORT_1(fn, pfx##64, sfx), PORT_1(fn, pfx##65, sfx), \
+ PORT_1(fn, pfx##66, sfx), PORT_1(fn, pfx##67, sfx), \
+ PORT_1(fn, pfx##68, sfx), PORT_1(fn, pfx##69, sfx), \
+ PORT_10(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##80, sfx), PORT_1(fn, pfx##81, sfx), \
+ PORT_1(fn, pfx##82, sfx), PORT_1(fn, pfx##83, sfx), \
+ PORT_1(fn, pfx##84, sfx), PORT_1(fn, pfx##85, sfx), \
+ /* Port96 - Port126 */ \
+ PORT_1(fn, pfx##96, sfx), PORT_1(fn, pfx##97, sfx), \
+ PORT_1(fn, pfx##98, sfx), PORT_1(fn, pfx##99, sfx), \
+ PORT_10(fn, pfx##10, sfx), \
+ PORT_10(fn, pfx##11, sfx), \
+ PORT_1(fn, pfx##120, sfx), PORT_1(fn, pfx##121, sfx), \
+ PORT_1(fn, pfx##122, sfx), PORT_1(fn, pfx##123, sfx), \
+ PORT_1(fn, pfx##124, sfx), PORT_1(fn, pfx##125, sfx), \
+ PORT_1(fn, pfx##126, sfx), \
+ /* Port128 - Port134 */ \
+ PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
+ PORT_1(fn, pfx##130, sfx), PORT_1(fn, pfx##131, sfx), \
+ PORT_1(fn, pfx##132, sfx), PORT_1(fn, pfx##133, sfx), \
+ PORT_1(fn, pfx##134, sfx), \
+ /* Port160 - Port178 */ \
+ PORT_10(fn, pfx##16, sfx), \
+ PORT_1(fn, pfx##170, sfx), PORT_1(fn, pfx##171, sfx), \
+ PORT_1(fn, pfx##172, sfx), PORT_1(fn, pfx##173, sfx), \
+ PORT_1(fn, pfx##174, sfx), PORT_1(fn, pfx##175, sfx), \
+ PORT_1(fn, pfx##176, sfx), PORT_1(fn, pfx##177, sfx), \
+ PORT_1(fn, pfx##178, sfx), \
+ /* Port192 - Port222 */ \
+ PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
+ PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
+ PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
+ PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
+ PORT_10(fn, pfx##20, sfx), \
+ PORT_10(fn, pfx##21, sfx), \
+ PORT_1(fn, pfx##220, sfx), PORT_1(fn, pfx##221, sfx), \
+ PORT_1(fn, pfx##222, sfx), \
+ /* Port224 - Port250 */ \
+ PORT_1(fn, pfx##224, sfx), PORT_1(fn, pfx##225, sfx), \
+ PORT_1(fn, pfx##226, sfx), PORT_1(fn, pfx##227, sfx), \
+ PORT_1(fn, pfx##228, sfx), PORT_1(fn, pfx##229, sfx), \
+ PORT_10(fn, pfx##23, sfx), \
+ PORT_10(fn, pfx##24, sfx), \
+ PORT_1(fn, pfx##250, sfx), \
+ /* Port256 - Port283 */ \
+ PORT_1(fn, pfx##256, sfx), PORT_1(fn, pfx##257, sfx), \
+ PORT_1(fn, pfx##258, sfx), PORT_1(fn, pfx##259, sfx), \
+ PORT_10(fn, pfx##26, sfx), \
+ PORT_10(fn, pfx##27, sfx), \
+ PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
+ PORT_1(fn, pfx##282, sfx), PORT_1(fn, pfx##283, sfx), \
+ /* Port288 - Port308 */ \
+ PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
+ PORT_10(fn, pfx##29, sfx), \
+ PORT_1(fn, pfx##300, sfx), PORT_1(fn, pfx##301, sfx), \
+ PORT_1(fn, pfx##302, sfx), PORT_1(fn, pfx##303, sfx), \
+ PORT_1(fn, pfx##304, sfx), PORT_1(fn, pfx##305, sfx), \
+ PORT_1(fn, pfx##306, sfx), PORT_1(fn, pfx##307, sfx), \
+ PORT_1(fn, pfx##308, sfx), \
+ /* Port320 - Port329 */ \
+ PORT_10(fn, pfx##32, sfx)
+
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ /* PORT0_DATA -> PORT329_DATA */
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA),
+ PINMUX_DATA_END,
+
+ /* PORT0_IN -> PORT329_IN */
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN),
+ PINMUX_INPUT_END,
+
+ /* PORT0_OUT -> PORT329_OUT */
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT),
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT329_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT329_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT329_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT329_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT329_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT329_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT329_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT329_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT329_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT329_FN7 */
+
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_25_0, MSEL1CR_25_1,
+ MSEL1CR_24_0, MSEL1CR_24_1,
+ MSEL1CR_22_0, MSEL1CR_22_1,
+ MSEL1CR_21_0, MSEL1CR_21_1,
+ MSEL1CR_20_0, MSEL1CR_20_1,
+ MSEL1CR_19_0, MSEL1CR_19_1,
+ MSEL1CR_18_0, MSEL1CR_18_1,
+ MSEL1CR_17_0, MSEL1CR_17_1,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ MSEL1CR_11_0, MSEL1CR_11_1,
+ MSEL1CR_10_0, MSEL1CR_10_1,
+ MSEL1CR_09_0, MSEL1CR_09_1,
+ MSEL1CR_08_0, MSEL1CR_08_1,
+ MSEL1CR_07_0, MSEL1CR_07_1,
+ MSEL1CR_06_0, MSEL1CR_06_1,
+ MSEL1CR_05_0, MSEL1CR_05_1,
+ MSEL1CR_04_0, MSEL1CR_04_1,
+ MSEL1CR_03_0, MSEL1CR_03_1,
+ MSEL1CR_02_0, MSEL1CR_02_1,
+ MSEL1CR_01_0, MSEL1CR_01_1,
+ MSEL1CR_00_0, MSEL1CR_00_1,
+
+ MSEL3CR_31_0, MSEL3CR_31_1,
+ MSEL3CR_28_0, MSEL3CR_28_1,
+ MSEL3CR_27_0, MSEL3CR_27_1,
+ MSEL3CR_26_0, MSEL3CR_26_1,
+ MSEL3CR_23_0, MSEL3CR_23_1,
+ MSEL3CR_22_0, MSEL3CR_22_1,
+ MSEL3CR_21_0, MSEL3CR_21_1,
+ MSEL3CR_20_0, MSEL3CR_20_1,
+ MSEL3CR_19_0, MSEL3CR_19_1,
+ MSEL3CR_18_0, MSEL3CR_18_1,
+ MSEL3CR_17_0, MSEL3CR_17_1,
+ MSEL3CR_16_0, MSEL3CR_16_1,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ MSEL3CR_12_0, MSEL3CR_12_1,
+ MSEL3CR_11_0, MSEL3CR_11_1,
+ MSEL3CR_10_0, MSEL3CR_10_1,
+ MSEL3CR_09_0, MSEL3CR_09_1,
+ MSEL3CR_06_0, MSEL3CR_06_1,
+ MSEL3CR_03_0, MSEL3CR_03_1,
+ MSEL3CR_01_0, MSEL3CR_01_1,
+ MSEL3CR_00_0, MSEL3CR_00_1,
+
+ MSEL4CR_30_0, MSEL4CR_30_1,
+ MSEL4CR_29_0, MSEL4CR_29_1,
+ MSEL4CR_28_0, MSEL4CR_28_1,
+ MSEL4CR_27_0, MSEL4CR_27_1,
+ MSEL4CR_26_0, MSEL4CR_26_1,
+ MSEL4CR_25_0, MSEL4CR_25_1,
+ MSEL4CR_24_0, MSEL4CR_24_1,
+ MSEL4CR_23_0, MSEL4CR_23_1,
+ MSEL4CR_22_0, MSEL4CR_22_1,
+ MSEL4CR_21_0, MSEL4CR_21_1,
+ MSEL4CR_20_0, MSEL4CR_20_1,
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_17_0, MSEL4CR_17_1,
+ MSEL4CR_16_0, MSEL4CR_16_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_14_0, MSEL4CR_14_1,
+ MSEL4CR_13_0, MSEL4CR_13_1,
+ MSEL4CR_12_0, MSEL4CR_12_1,
+ MSEL4CR_11_0, MSEL4CR_11_1,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ MSEL4CR_09_0, MSEL4CR_09_1,
+ MSEL4CR_07_0, MSEL4CR_07_1,
+ MSEL4CR_04_0, MSEL4CR_04_1,
+ MSEL4CR_01_0, MSEL4CR_01_1,
+
+ MSEL5CR_31_0, MSEL5CR_31_1,
+ MSEL5CR_30_0, MSEL5CR_30_1,
+ MSEL5CR_29_0, MSEL5CR_29_1,
+ MSEL5CR_28_0, MSEL5CR_28_1,
+ MSEL5CR_27_0, MSEL5CR_27_1,
+ MSEL5CR_26_0, MSEL5CR_26_1,
+ MSEL5CR_25_0, MSEL5CR_25_1,
+ MSEL5CR_24_0, MSEL5CR_24_1,
+ MSEL5CR_23_0, MSEL5CR_23_1,
+ MSEL5CR_22_0, MSEL5CR_22_1,
+ MSEL5CR_21_0, MSEL5CR_21_1,
+ MSEL5CR_20_0, MSEL5CR_20_1,
+ MSEL5CR_19_0, MSEL5CR_19_1,
+ MSEL5CR_18_0, MSEL5CR_18_1,
+ MSEL5CR_17_0, MSEL5CR_17_1,
+ MSEL5CR_16_0, MSEL5CR_16_1,
+ MSEL5CR_15_0, MSEL5CR_15_1,
+ MSEL5CR_14_0, MSEL5CR_14_1,
+ MSEL5CR_13_0, MSEL5CR_13_1,
+ MSEL5CR_12_0, MSEL5CR_12_1,
+ MSEL5CR_11_0, MSEL5CR_11_1,
+ MSEL5CR_10_0, MSEL5CR_10_1,
+ MSEL5CR_09_0, MSEL5CR_09_1,
+ MSEL5CR_08_0, MSEL5CR_08_1,
+ MSEL5CR_07_0, MSEL5CR_07_1,
+ MSEL5CR_06_0, MSEL5CR_06_1,
+
+ MSEL8CR_16_0, MSEL8CR_16_1,
+ MSEL8CR_01_0, MSEL8CR_01_1,
+ MSEL8CR_00_0, MSEL8CR_00_1,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+
+#define F1(a) a##_MARK
+#define F2(a) a##_MARK
+#define F3(a) a##_MARK
+#define F4(a) a##_MARK
+#define F5(a) a##_MARK
+#define F6(a) a##_MARK
+#define F7(a) a##_MARK
+#define IRQ(a) IRQ##a##_MARK
+
+ F1(LCDD0), F3(PDM2_CLK_0), F7(DU0_DR0), IRQ(0), /* Port0 */
+ F1(LCDD1), F3(PDM2_DATA_1), F7(DU0_DR19), IRQ(1),
+ F1(LCDD2), F3(PDM3_CLK_2), F7(DU0_DR2), IRQ(2),
+ F1(LCDD3), F3(PDM3_DATA_3), F7(DU0_DR3), IRQ(3),
+ F1(LCDD4), F3(PDM4_CLK_4), F7(DU0_DR4), IRQ(4),
+ F1(LCDD5), F3(PDM4_DATA_5), F7(DU0_DR5), IRQ(5),
+ F1(LCDD6), F3(PDM0_OUTCLK_6), F7(DU0_DR6), IRQ(6),
+ F1(LCDD7), F3(PDM0_OUTDATA_7), F7(DU0_DR7), IRQ(7),
+ F1(LCDD8), F3(PDM1_OUTCLK_8), F7(DU0_DG0), IRQ(8),
+ F1(LCDD9), F3(PDM1_OUTDATA_9), F7(DU0_DG1), IRQ(9),
+ F1(LCDD10), F3(FSICCK), F7(DU0_DG2), IRQ(10), /* Port10 */
+ F1(LCDD11), F3(FSICISLD), F7(DU0_DG3), IRQ(11),
+ F1(LCDD12), F3(FSICOMC), F7(DU0_DG4), IRQ(12),
+ F1(LCDD13), F3(FSICOLR), F4(FSICILR), F7(DU0_DG5), IRQ(13),
+ F1(LCDD14), F3(FSICOBT), F4(FSICIBT), F7(DU0_DG6), IRQ(14),
+ F1(LCDD15), F3(FSICOSLD), F7(DU0_DG7), IRQ(15),
+ F1(LCDD16), F4(TPU1TO1), F7(DU0_DB0),
+ F1(LCDD17), F4(SF_IRQ_00), F7(DU0_DB1),
+ F1(LCDD18), F4(SF_IRQ_01), F7(DU0_DB2),
+ F1(LCDD19), F3(SCIFB3_RTS_19), F7(DU0_DB3),
+ F1(LCDD20), F3(SCIFB3_CTS_20), F7(DU0_DB4), /* Port20 */
+ F1(LCDD21), F3(SCIFB3_TXD_21), F7(DU0_DB5),
+ F1(LCDD22), F3(SCIFB3_RXD_22), F7(DU0_DB6),
+ F1(LCDD23), F3(SCIFB3_SCK_23), F7(DU0_DB7),
+ F1(LCDHSYN), F2(LCDCS), F3(SCIFB1_RTS_24),
+ F7(DU0_EXHSYNC_N_CSYNC_N_HSYNC_N),
+ F1(LCDVSYN), F3(SCIFB1_CTS_25), F7(DU0_EXVSYNC_N_VSYNC_N_CSYNC_N),
+ F1(LCDDCK), F2(LCDWR), F3(SCIFB1_TXD_26), F7(DU0_DOTCLKIN),
+ F1(LCDDISP), F2(LCDRS), F3(SCIFB1_RXD_27), F7(DU0_DOTCLKOUT),
+ F1(LCDRD_N), F3(SCIFB1_SCK_28), F7(DU0_DOTCLKOUTB),
+ F1(LCDLCLK), F4(SF_IRQ_02), F7(DU0_DISP_CSYNC_N_DE),
+ F1(LCDDON), F4(SF_IRQ_03), F7(DU0_ODDF_N_CLAMP), /* Port30 */
+
+ F1(SCIFA0_RTS), F5(SIM0_DET), F7(CSCIF0_RTS), /* Port32 */
+ F1(SCIFA0_CTS), F5(SIM1_DET), F7(CSCIF0_CTS),
+ F1(SCIFA0_SCK), F5(SIM0_PWRON), F7(CSCIF0_SCK),
+ F1(SCIFA1_RTS), F7(CSCIF1_RTS),
+ F1(SCIFA1_CTS), F7(CSCIF1_CTS),
+ F1(SCIFA1_SCK), F7(CSCIF1_SCK),
+ F1(SCIFB0_RTS), F3(TPU0TO1), F4(SCIFB3_RTS_38), F7(CHSCIF0_HRTS),
+ F1(SCIFB0_CTS), F3(TPU0TO2), F4(SCIFB3_CTS_39), F7(CHSCIF0_HCTS),
+ F1(SCIFB0_SCK), F3(TPU0TO3), F4(SCIFB3_SCK_40),
+ F7(CHSCIF0_HSCK), /* Port40 */
+
+ F1(PDM0_DATA), /* Port64 */
+ F1(PDM1_DATA),
+ F1(HSI_RX_WAKE), F2(SCIFB2_CTS_66), F3(MSIOF3_SYNC), F5(GenIO4),
+ IRQ(40),
+ F1(HSI_RX_READY), F2(SCIFB1_TXD_67), F5(GIO_OUT3_67), F7(CHSCIF1_HTX),
+ F1(HSI_RX_FLAG), F2(SCIFB2_TXD_68), F3(MSIOF3_TXD), F5(GIO_OUT4_68),
+ F1(HSI_RX_DATA), F2(SCIFB2_RXD_69), F3(MSIOF3_RXD), F5(GIO_OUT5_69),
+ F1(HSI_TX_FLAG), F2(SCIFB1_RTS_70), F5(GIO_OUT1_70), F6(HSIC_TSTCLK0),
+ F7(CHSCIF1_HRTS), /* Port70 */
+ F1(HSI_TX_DATA), F2(SCIFB1_CTS_71), F5(GIO_OUT2_71), F6(HSIC_TSTCLK1),
+ F7(CHSCIF1_HCTS),
+ F1(HSI_TX_WAKE), F2(SCIFB1_RXD_72), F5(GenIO8), F7(CHSCIF1_HRX),
+ F1(HSI_TX_READY), F2(SCIFB2_RTS_73), F3(MSIOF3_SCK), F5(GIO_OUT0_73),
+ F1(IRDA_OUT), F1(IRDA_IN), F1(IRDA_FIRSEL), F1(TPU0TO0),
+ F1(DIGRFEN), F1(GPS_TIMESTAMP), F1(TXP), /* Port80 */
+ F1(TXP2), F1(COEX_0), F1(COEX_1), IRQ(19), IRQ(18), /* Port85 */
+
+ F1(KEYIN0), /* Port96 */
+ F1(KEYIN1), F1(KEYIN2), F1(KEYIN3), F1(KEYIN4), /* Port100 */
+ F1(KEYIN5), F1(KEYIN6), IRQ(41), F1(KEYIN7), IRQ(42),
+ F2(KEYOUT0), F2(KEYOUT1), F2(KEYOUT2), F2(KEYOUT3),
+ F2(KEYOUT4), F2(KEYOUT5), IRQ(43), F2(KEYOUT6), IRQ(44), /* Port110 */
+ F2(KEYOUT7), F5(RFANAEN), IRQ(45),
+ F1(KEYIN8), F2(KEYOUT8), F4(SF_IRQ_04), IRQ(46),
+ F1(KEYIN9), F2(KEYOUT9), F4(SF_IRQ_05), IRQ(47),
+ F1(KEYIN10), F2(KEYOUT10), F4(SF_IRQ_06), IRQ(48),
+ F1(KEYIN11), F2(KEYOUT11), F4(SF_IRQ_07), IRQ(49),
+ F1(SCIFA0_TXD), F7(CSCIF0_TX), F1(SCIFA0_RXD), F7(CSCIF0_RX),
+ F1(SCIFA1_TXD), F7(CSCIF1_TX), F1(SCIFA1_RXD), F7(CSCIF1_RX),
+ F3(SF_PORT_1_120), F4(SCIFB3_RXD_120), F7(DU0_CDE), /* Port120 */
+ F3(SF_PORT_0_121), F4(SCIFB3_TXD_121),
+ F1(SCIFB0_TXD), F7(CHSCIF0_HTX),
+ F1(SCIFB0_RXD), F7(CHSCIF0_HRX), F3(ISP_STROBE_124),
+ F1(STP_ISD_0), F2(PDM4_CLK_125), F3(MSIOF2_TXD), F5(SIM0_VOLTSEL0),
+ F1(TS_SDEN), F2(MSIOF7_SYNC), F3(STP_ISEN_1),
+ F1(STP_ISEN_0), F2(PDM1_OUTDATA_128), F3(MSIOF2_SYNC),
+ F5(SIM1_VOLTSEL1), F1(TS_SPSYNC), F2(MSIOF7_RXD), F3(STP_ISSYNC_1),
+ F1(STP_ISSYNC_0), F2(PDM4_DATA_130), F3(MSIOF2_RXD),
+ F5(SIM0_VOLTSEL1), /* Port130 */
+ F1(STP_OPWM_0), F5(SIM1_PWRON), F1(TS_SCK), F2(MSIOF7_SCK),
+ F3(STP_ISCLK_1), F1(STP_ISCLK_0), F2(PDM1_OUTCLK_133), F3(MSIOF2_SCK),
+ F5(SIM1_VOLTSEL0), F1(TS_SDAT), F2(MSIOF7_TXD), F3(STP_ISD_1),
+ IRQ(20), /* Port160 */
+ IRQ(21), IRQ(22), IRQ(23),
+ F1(MMCD0_0), F1(MMCD0_1), F1(MMCD0_2), F1(MMCD0_3),
+ F1(MMCD0_4), F1(MMCD0_5), F1(MMCD0_6), /* Port170 */
+ F1(MMCD0_7), F1(MMCCMD0), F1(MMCCLK0), F1(MMCRST),
+ IRQ(24), IRQ(25), IRQ(26), IRQ(27),
+ F1(A10), F2(MMCD1_7), IRQ(31), /* Port192 */
+ F1(A9), F2(MMCD1_6), IRQ(32),
+ F1(A8), F2(MMCD1_5), IRQ(33),
+ F1(A7), F2(MMCD1_4), IRQ(34),
+ F1(A6), F2(MMCD1_3), IRQ(35),
+ F1(A5), F2(MMCD1_2), IRQ(36),
+ F1(A4), F2(MMCD1_1), IRQ(37),
+ F1(A3), F2(MMCD1_0), IRQ(38),
+ F1(A2), F2(MMCCMD1), IRQ(39), /* Port200 */
+ F1(A1),
+ F1(A0), F2(BS),
+ F1(CKO), F2(MMCCLK1),
+ F1(CS0_N), F5(SIM0_GPO1),
+ F1(CS2_N), F5(SIM0_GPO2),
+ F1(CS4_N), F2(VIO_VD), F5(SIM1_GPO0),
+ F1(D15), F5(GIO_OUT15),
+ F1(D14), F5(GIO_OUT14),
+ F1(D13), F5(GIO_OUT13),
+ F1(D12), F5(GIO_OUT12), /* Port210 */
+ F1(D11), F5(WGM_TXP2),
+ F1(D10), F5(WGM_GPS_TIMEM_ASK_RFCLK),
+ F1(D9), F2(VIO_D9), F5(GIO_OUT9),
+ F1(D8), F2(VIO_D8), F5(GIO_OUT8),
+ F1(D7), F2(VIO_D7), F5(GIO_OUT7),
+ F1(D6), F2(VIO_D6), F5(GIO_OUT6),
+ F1(D5), F2(VIO_D5), F5(GIO_OUT5_217),
+ F1(D4), F2(VIO_D4), F5(GIO_OUT4_218),
+ F1(D3), F2(VIO_D3), F5(GIO_OUT3_219),
+ F1(D2), F2(VIO_D2), F5(GIO_OUT2_220), /* Port220 */
+ F1(D1), F2(VIO_D1), F5(GIO_OUT1_221),
+ F1(D0), F2(VIO_D0), F5(GIO_OUT0_222),
+ F1(RDWR_224), F2(VIO_HD), F5(SIM1_GPO2),
+ F1(RD_N), F1(WAIT_N), F2(VIO_CLK), F5(SIM1_GPO1),
+ F1(WE0_N), F2(RDWR_227),
+ F1(WE1_N), F5(SIM0_GPO0),
+ F1(PWMO), F2(VIO_CKO1_229),
+ F1(SLIM_CLK), F2(VIO_CKO4_230), /* Port230 */
+ F1(SLIM_DATA), F2(VIO_CKO5_231), F2(VIO_CKO2_232), F4(SF_PORT_0_232),
+ F2(VIO_CKO3_233), F4(SF_PORT_1_233),
+ F1(FSIACK), F2(PDM3_CLK_234), F3(ISP_IRIS1_234),
+ F1(FSIAISLD), F2(PDM3_DATA_235),
+ F1(FSIAOMC), F2(PDM0_OUTCLK_236), F3(ISP_IRIS0_236),
+ F1(FSIAOLR), F2(FSIAILR), F1(FSIAOBT), F2(FSIAIBT),
+ F1(FSIAOSLD), F2(PDM0_OUTDATA_239),
+ F1(FSIBISLD), /* Port240 */
+ F1(FSIBOLR), F2(FSIBILR), F1(FSIBOMC), F3(ISP_SHUTTER1_242),
+ F1(FSIBOBT), F2(FSIBIBT), F1(FSIBOSLD), F2(FSIASPDIF),
+ F1(FSIBCK), F3(ISP_SHUTTER0_245),
+ F1(ISP_IRIS1_246), F1(ISP_IRIS0_247), F1(ISP_SHUTTER1_248),
+ F1(ISP_SHUTTER0_249), F1(ISP_STROBE_250), /* Port250 */
+ F1(MSIOF0_SYNC), F1(MSIOF0_RXD), F1(MSIOF0_SCK), F1(MSIOF0_SS2),
+ F3(VIO_CKO3_259), F1(MSIOF0_TXD), /* Port260 */
+ F2(SCIFB1_SCK_261), F7(CHSCIF1_HSCK), F2(SCIFB2_SCK_262),
+ F1(MSIOF1_SS2), F4(MSIOF5_SS2), F1(MSIOF1_TXD), F4(MSIOF5_TXD),
+ F1(MSIOF1_RXD), F4(MSIOF5_RXD), F1(MSIOF1_SS1), F4(MSIOF5_SS1),
+ F1(MSIOF0_SS1), F1(MSIOF1_SCK), F4(MSIOF5_SCK),
+ F1(MSIOF1_SYNC), F4(MSIOF5_SYNC),
+ F1(MSIOF2_SS1), F3(VIO_CKO5_270), /* Port270 */
+ F1(MSIOF2_SS2), F3(VIO_CKO2_271), F1(MSIOF3_SS2), F3(VIO_CKO1_272),
+ F1(MSIOF3_SS1), F3(VIO_CKO4_273), F1(MSIOF4_SS2), F4(TPU1TO0),
+ F1(IC_DP), F1(SIM0_RST), F1(IC_DM), F1(SIM0_BSICOMP),
+ F1(SIM0_CLK), F1(SIM0_IO), /* Port280 */
+ F1(SIM1_IO), F2(PDM2_DATA_281), F1(SIM1_CLK), F2(PDM2_CLK_282),
+ F1(SIM1_RST), F1(SDHID1_0), F3(STMDATA0_2),
+ F1(SDHID1_1), F3(STMDATA1_2), IRQ(51), /* Port290 */
+ F1(SDHID1_2), F3(STMDATA2_2), F1(SDHID1_3), F3(STMDATA3_2),
+ F1(SDHICLK1), F3(STMCLK_2), F1(SDHICMD1), F3(STMSIDI_2),
+ F1(SDHID2_0), F2(MSIOF4_TXD), F3(SCIFB2_TXD_295), F4(MSIOF6_TXD),
+ F1(SDHID2_1), F4(MSIOF6_SS2), IRQ(52),
+ F1(SDHID2_2), F2(MSIOF4_RXD), F3(SCIFB2_RXD_297), F4(MSIOF6_RXD),
+ F1(SDHID2_3), F2(MSIOF4_SYNC), F3(SCIFB2_CTS_298), F4(MSIOF6_SYNC),
+ F1(SDHICLK2), F2(MSIOF4_SCK), F3(SCIFB2_SCK_299), F4(MSIOF6_SCK),
+ F1(SDHICMD2), F2(MSIOF4_SS1), F3(SCIFB2_RTS_300),
+ F4(MSIOF6_SS1), /* Port300 */
+ F1(SDHICD0), IRQ(50), F1(SDHID0_0), F3(STMDATA0_1),
+ F1(SDHID0_1), F3(STMDATA1_1), F1(SDHID0_2), F3(STMDATA2_1),
+ F1(SDHID0_3), F3(STMDATA3_1), F1(SDHICMD0), F3(STMSIDI_1),
+ F1(SDHIWP0), F1(SDHICLK0), F3(STMCLK_1), IRQ(16), /* Port320 */
+ IRQ(17), IRQ(28), IRQ(29), IRQ(30), IRQ(53), IRQ(54),
+ IRQ(55), IRQ(56), IRQ(57),
+ PINMUX_MARK_END,
+};
+
+#define _PORT_DATA(pfx, sfx) PORT_DATA_IO(pfx)
+#define PINMUX_DATA_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
+
+static const pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+ PINMUX_DATA_ALL(),
+
+ /* Port0 */
+ PINMUX_DATA(LCDD0_MARK, PORT0_FN1),
+ PINMUX_DATA(PDM2_CLK_0_MARK, PORT0_FN3),
+ PINMUX_DATA(DU0_DR0_MARK, PORT0_FN7),
+ PINMUX_DATA(IRQ0_MARK, PORT0_FN0),
+
+ /* Port1 */
+ PINMUX_DATA(LCDD1_MARK, PORT1_FN1),
+ PINMUX_DATA(PDM2_DATA_1_MARK, PORT1_FN3, MSEL3CR_12_0),
+ PINMUX_DATA(DU0_DR19_MARK, PORT1_FN7),
+ PINMUX_DATA(IRQ1_MARK, PORT1_FN0),
+
+ /* Port2 */
+ PINMUX_DATA(LCDD2_MARK, PORT2_FN1),
+ PINMUX_DATA(PDM3_CLK_2_MARK, PORT2_FN3),
+ PINMUX_DATA(DU0_DR2_MARK, PORT2_FN7),
+ PINMUX_DATA(IRQ2_MARK, PORT2_FN0),
+
+ /* Port3 */
+ PINMUX_DATA(LCDD3_MARK, PORT3_FN1),
+ PINMUX_DATA(PDM3_DATA_3_MARK, PORT3_FN3, MSEL3CR_12_0),
+ PINMUX_DATA(DU0_DR3_MARK, PORT3_FN7),
+ PINMUX_DATA(IRQ3_MARK, PORT3_FN0),
+
+ /* Port4 */
+ PINMUX_DATA(LCDD4_MARK, PORT4_FN1),
+ PINMUX_DATA(PDM4_CLK_4_MARK, PORT4_FN3),
+ PINMUX_DATA(DU0_DR4_MARK, PORT4_FN7),
+ PINMUX_DATA(IRQ4_MARK, PORT4_FN0),
+
+ /* Port5 */
+ PINMUX_DATA(LCDD5_MARK, PORT5_FN1),
+ PINMUX_DATA(PDM4_DATA_5_MARK, PORT5_FN3, MSEL3CR_12_0),
+ PINMUX_DATA(DU0_DR5_MARK, PORT5_FN7),
+ PINMUX_DATA(IRQ5_MARK, PORT5_FN0),
+
+ /* Port6 */
+ PINMUX_DATA(LCDD6_MARK, PORT6_FN1),
+ PINMUX_DATA(PDM0_OUTCLK_6_MARK, PORT6_FN3),
+ PINMUX_DATA(DU0_DR6_MARK, PORT6_FN7),
+ PINMUX_DATA(IRQ6_MARK, PORT6_FN0),
+
+ /* Port7 */
+ PINMUX_DATA(LCDD7_MARK, PORT7_FN1),
+ PINMUX_DATA(PDM0_OUTDATA_7_MARK, PORT7_FN3),
+ PINMUX_DATA(DU0_DR7_MARK, PORT7_FN7),
+ PINMUX_DATA(IRQ7_MARK, PORT7_FN0),
+
+ /* Port8 */
+ PINMUX_DATA(LCDD8_MARK, PORT8_FN1),
+ PINMUX_DATA(PDM1_OUTCLK_8_MARK, PORT8_FN3),
+ PINMUX_DATA(DU0_DG0_MARK, PORT8_FN7),
+ PINMUX_DATA(IRQ8_MARK, PORT8_FN0),
+
+ /* Port9 */
+ PINMUX_DATA(LCDD9_MARK, PORT9_FN1),
+ PINMUX_DATA(PDM1_OUTDATA_9_MARK, PORT9_FN3),
+ PINMUX_DATA(DU0_DG1_MARK, PORT9_FN7),
+ PINMUX_DATA(IRQ9_MARK, PORT9_FN0),
+
+ /* Port10 */
+ PINMUX_DATA(LCDD10_MARK, PORT10_FN1),
+ PINMUX_DATA(FSICCK_MARK, PORT10_FN3),
+ PINMUX_DATA(DU0_DG2_MARK, PORT10_FN7),
+ PINMUX_DATA(IRQ10_MARK, PORT10_FN0),
+
+ /* Port11 */
+ PINMUX_DATA(LCDD11_MARK, PORT11_FN1),
+ PINMUX_DATA(FSICISLD_MARK, PORT11_FN3),
+ PINMUX_DATA(DU0_DG3_MARK, PORT11_FN7),
+ PINMUX_DATA(IRQ11_MARK, PORT11_FN0),
+
+ /* Port12 */
+ PINMUX_DATA(LCDD12_MARK, PORT12_FN1),
+ PINMUX_DATA(FSICOMC_MARK, PORT12_FN3),
+ PINMUX_DATA(DU0_DG4_MARK, PORT12_FN7),
+ PINMUX_DATA(IRQ12_MARK, PORT12_FN0),
+
+ /* Port13 */
+ PINMUX_DATA(LCDD13_MARK, PORT13_FN1),
+ PINMUX_DATA(FSICOLR_MARK, PORT13_FN3),
+ PINMUX_DATA(FSICILR_MARK, PORT13_FN4),
+ PINMUX_DATA(DU0_DG5_MARK, PORT13_FN7),
+ PINMUX_DATA(IRQ13_MARK, PORT13_FN0),
+
+ /* Port14 */
+ PINMUX_DATA(LCDD14_MARK, PORT14_FN1),
+ PINMUX_DATA(FSICOBT_MARK, PORT14_FN3),
+ PINMUX_DATA(FSICIBT_MARK, PORT14_FN4),
+ PINMUX_DATA(DU0_DG6_MARK, PORT14_FN7),
+ PINMUX_DATA(IRQ14_MARK, PORT14_FN0),
+
+ /* Port15 */
+ PINMUX_DATA(LCDD15_MARK, PORT15_FN1),
+ PINMUX_DATA(FSICOSLD_MARK, PORT15_FN3),
+ PINMUX_DATA(DU0_DG7_MARK, PORT15_FN7),
+ PINMUX_DATA(IRQ15_MARK, PORT15_FN0),
+
+ /* Port16 */
+ PINMUX_DATA(LCDD16_MARK, PORT16_FN1),
+ PINMUX_DATA(TPU1TO1_MARK, PORT16_FN4),
+ PINMUX_DATA(DU0_DB0_MARK, PORT16_FN7),
+
+ /* Port17 */
+ PINMUX_DATA(LCDD17_MARK, PORT17_FN1),
+ PINMUX_DATA(SF_IRQ_00_MARK, PORT17_FN4),
+ PINMUX_DATA(DU0_DB1_MARK, PORT17_FN7),
+
+ /* Port18 */
+ PINMUX_DATA(LCDD18_MARK, PORT18_FN1),
+ PINMUX_DATA(SF_IRQ_01_MARK, PORT18_FN4),
+ PINMUX_DATA(DU0_DB2_MARK, PORT18_FN7),
+
+ /* Port19 */
+ PINMUX_DATA(LCDD19_MARK, PORT19_FN1),
+ PINMUX_DATA(SCIFB3_RTS_19_MARK, PORT19_FN3),
+ PINMUX_DATA(DU0_DB3_MARK, PORT19_FN7),
+
+ /* Port20 */
+ PINMUX_DATA(LCDD20_MARK, PORT20_FN1),
+ PINMUX_DATA(SCIFB3_CTS_20_MARK, PORT20_FN3, MSEL3CR_09_0),
+ PINMUX_DATA(DU0_DB4_MARK, PORT20_FN7),
+
+ /* Port21 */
+ PINMUX_DATA(LCDD21_MARK, PORT21_FN1),
+ PINMUX_DATA(SCIFB3_TXD_21_MARK, PORT21_FN3, MSEL3CR_09_0),
+ PINMUX_DATA(DU0_DB5_MARK, PORT21_FN7),
+
+ /* Port22 */
+ PINMUX_DATA(LCDD22_MARK, PORT22_FN1),
+ PINMUX_DATA(SCIFB3_RXD_22_MARK, PORT22_FN3, MSEL3CR_09_0),
+ PINMUX_DATA(DU0_DB6_MARK, PORT22_FN7),
+
+ /* Port23 */
+ PINMUX_DATA(LCDD23_MARK, PORT23_FN1),
+ PINMUX_DATA(SCIFB3_SCK_23_MARK, PORT23_FN3),
+ PINMUX_DATA(DU0_DB7_MARK, PORT23_FN7),
+
+ /* Port24 */
+ PINMUX_DATA(LCDHSYN_MARK, PORT24_FN1),
+ PINMUX_DATA(LCDCS_MARK, PORT24_FN2),
+ PINMUX_DATA(SCIFB1_RTS_24_MARK, PORT24_FN3),
+ PINMUX_DATA(DU0_EXHSYNC_N_CSYNC_N_HSYNC_N_MARK, PORT24_FN7),
+
+ /* Port25 */
+ PINMUX_DATA(LCDVSYN_MARK, PORT25_FN1),
+ PINMUX_DATA(SCIFB1_CTS_25_MARK, PORT25_FN3, MSEL3CR_11_0),
+ PINMUX_DATA(DU0_EXVSYNC_N_VSYNC_N_CSYNC_N_MARK, PORT25_FN7),
+
+ /* Port26 */
+ PINMUX_DATA(LCDDCK_MARK, PORT26_FN1),
+ PINMUX_DATA(LCDWR_MARK, PORT26_FN2),
+ PINMUX_DATA(SCIFB1_TXD_26_MARK, PORT26_FN3, MSEL3CR_11_0),
+ PINMUX_DATA(DU0_DOTCLKIN_MARK, PORT26_FN7),
+
+ /* Port27 */
+ PINMUX_DATA(LCDDISP_MARK, PORT27_FN1),
+ PINMUX_DATA(LCDRS_MARK, PORT27_FN2),
+ PINMUX_DATA(SCIFB1_RXD_27_MARK, PORT27_FN3, MSEL3CR_11_0),
+ PINMUX_DATA(DU0_DOTCLKOUT_MARK, PORT27_FN7),
+
+ /* Port28 */
+ PINMUX_DATA(LCDRD_N_MARK, PORT28_FN1),
+ PINMUX_DATA(SCIFB1_SCK_28_MARK, PORT28_FN3),
+ PINMUX_DATA(DU0_DOTCLKOUTB_MARK, PORT28_FN7),
+
+ /* Port29 */
+ PINMUX_DATA(LCDLCLK_MARK, PORT29_FN1),
+ PINMUX_DATA(SF_IRQ_02_MARK, PORT29_FN4),
+ PINMUX_DATA(DU0_DISP_CSYNC_N_DE_MARK, PORT29_FN7),
+
+ /* Port30 */
+ PINMUX_DATA(LCDDON_MARK, PORT30_FN1),
+ PINMUX_DATA(SF_IRQ_03_MARK, PORT30_FN4),
+ PINMUX_DATA(DU0_ODDF_N_CLAMP_MARK, PORT30_FN7),
+
+ /* Port32 */
+ PINMUX_DATA(SCIFA0_RTS_MARK, PORT32_FN1),
+ PINMUX_DATA(SIM0_DET_MARK, PORT32_FN5),
+ PINMUX_DATA(CSCIF0_RTS_MARK, PORT32_FN7),
+
+ /* Port33 */
+ PINMUX_DATA(SCIFA0_CTS_MARK, PORT33_FN1),
+ PINMUX_DATA(SIM1_DET_MARK, PORT33_FN5),
+ PINMUX_DATA(CSCIF0_CTS_MARK, PORT33_FN7),
+
+ /* Port34 */
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT34_FN1),
+ PINMUX_DATA(SIM0_PWRON_MARK, PORT34_FN5),
+ PINMUX_DATA(CSCIF0_SCK_MARK, PORT34_FN7),
+
+ /* Port35 */
+ PINMUX_DATA(SCIFA1_RTS_MARK, PORT35_FN1),
+ PINMUX_DATA(CSCIF1_RTS_MARK, PORT35_FN7),
+
+ /* Port36 */
+ PINMUX_DATA(SCIFA1_CTS_MARK, PORT36_FN1),
+ PINMUX_DATA(CSCIF1_CTS_MARK, PORT36_FN7),
+
+ /* Port37 */
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT37_FN1),
+ PINMUX_DATA(CSCIF1_SCK_MARK, PORT37_FN7),
+
+ /* Port38 */
+ PINMUX_DATA(SCIFB0_RTS_MARK, PORT38_FN1),
+ PINMUX_DATA(TPU0TO1_MARK, PORT38_FN3),
+ PINMUX_DATA(SCIFB3_RTS_38_MARK, PORT38_FN4),
+ PINMUX_DATA(CHSCIF0_HRTS_MARK, PORT38_FN7),
+
+ /* Port39 */
+ PINMUX_DATA(SCIFB0_CTS_MARK, PORT39_FN1),
+ PINMUX_DATA(TPU0TO2_MARK, PORT39_FN3),
+ PINMUX_DATA(SCIFB3_CTS_39_MARK, PORT39_FN4, MSEL3CR_09_1),
+ PINMUX_DATA(CHSCIF0_HCTS_MARK, PORT39_FN7),
+
+ /* Port40 */
+ PINMUX_DATA(SCIFB0_SCK_MARK, PORT40_FN1),
+ PINMUX_DATA(TPU0TO3_MARK, PORT40_FN3),
+ PINMUX_DATA(SCIFB3_SCK_40_MARK, PORT40_FN4),
+ PINMUX_DATA(CHSCIF0_HSCK_MARK, PORT40_FN7),
+
+ /* Port64 */
+ PINMUX_DATA(PDM0_DATA_MARK, PORT64_FN1),
+
+ /* Port65 */
+ PINMUX_DATA(PDM1_DATA_MARK, PORT65_FN1),
+
+ /* Port66 */
+ PINMUX_DATA(HSI_RX_WAKE_MARK, PORT66_FN1),
+ PINMUX_DATA(SCIFB2_CTS_66_MARK, PORT66_FN2, MSEL3CR_10_0),
+ PINMUX_DATA(MSIOF3_SYNC_MARK, PORT66_FN3),
+ PINMUX_DATA(GenIO4_MARK, PORT66_FN5),
+ PINMUX_DATA(IRQ40_MARK, PORT66_FN0),
+
+ /* Port67 */
+ PINMUX_DATA(HSI_RX_READY_MARK, PORT67_FN1),
+ PINMUX_DATA(SCIFB1_TXD_67_MARK, PORT67_FN2, MSEL3CR_11_1),
+ PINMUX_DATA(GIO_OUT3_67_MARK, PORT67_FN5),
+ PINMUX_DATA(CHSCIF1_HTX_MARK, PORT67_FN7),
+
+ /* Port68 */
+ PINMUX_DATA(HSI_RX_FLAG_MARK, PORT68_FN1),
+ PINMUX_DATA(SCIFB2_TXD_68_MARK, PORT68_FN2, MSEL3CR_10_0),
+ PINMUX_DATA(MSIOF3_TXD_MARK, PORT68_FN3),
+ PINMUX_DATA(GIO_OUT4_68_MARK, PORT68_FN5),
+
+ /* Port69 */
+ PINMUX_DATA(HSI_RX_DATA_MARK, PORT69_FN1),
+ PINMUX_DATA(SCIFB2_RXD_69_MARK, PORT69_FN2, MSEL3CR_10_0),
+ PINMUX_DATA(MSIOF3_RXD_MARK, PORT69_FN3),
+ PINMUX_DATA(GIO_OUT5_69_MARK, PORT69_FN5),
+
+ /* Port70 */
+ PINMUX_DATA(HSI_TX_FLAG_MARK, PORT70_FN1),
+ PINMUX_DATA(SCIFB1_RTS_70_MARK, PORT70_FN2),
+ PINMUX_DATA(GIO_OUT1_70_MARK, PORT70_FN5),
+ PINMUX_DATA(HSIC_TSTCLK0_MARK, PORT70_FN6),
+ PINMUX_DATA(CHSCIF1_HRTS_MARK, PORT70_FN7),
+
+ /* Port71 */
+ PINMUX_DATA(HSI_TX_DATA_MARK, PORT71_FN1),
+ PINMUX_DATA(SCIFB1_CTS_71_MARK, PORT71_FN2, MSEL3CR_11_1),
+ PINMUX_DATA(GIO_OUT2_71_MARK, PORT71_FN5),
+ PINMUX_DATA(HSIC_TSTCLK1_MARK, PORT71_FN6),
+ PINMUX_DATA(CHSCIF1_HCTS_MARK, PORT71_FN7),
+
+ /* Port72 */
+ PINMUX_DATA(HSI_TX_WAKE_MARK, PORT72_FN1),
+ PINMUX_DATA(SCIFB1_RXD_72_MARK, PORT72_FN2, MSEL3CR_11_1),
+ PINMUX_DATA(GenIO8_MARK, PORT72_FN5),
+ PINMUX_DATA(CHSCIF1_HRX_MARK, PORT72_FN7),
+
+ /* Port73 */
+ PINMUX_DATA(HSI_TX_READY_MARK, PORT73_FN1),
+ PINMUX_DATA(SCIFB2_RTS_73_MARK, PORT73_FN2),
+ PINMUX_DATA(MSIOF3_SCK_MARK, PORT73_FN3),
+ PINMUX_DATA(GIO_OUT0_73_MARK, PORT73_FN5),
+
+ /* Port74 - Port85 */
+ PINMUX_DATA(IRDA_OUT_MARK, PORT74_FN1),
+ PINMUX_DATA(IRDA_IN_MARK, PORT75_FN1),
+ PINMUX_DATA(IRDA_FIRSEL_MARK, PORT76_FN1),
+ PINMUX_DATA(TPU0TO0_MARK, PORT77_FN1),
+ PINMUX_DATA(DIGRFEN_MARK, PORT78_FN1),
+ PINMUX_DATA(GPS_TIMESTAMP_MARK, PORT79_FN1),
+ PINMUX_DATA(TXP_MARK, PORT80_FN1),
+ PINMUX_DATA(TXP2_MARK, PORT81_FN1),
+ PINMUX_DATA(COEX_0_MARK, PORT82_FN1),
+ PINMUX_DATA(COEX_1_MARK, PORT83_FN1),
+ PINMUX_DATA(IRQ19_MARK, PORT84_FN0),
+ PINMUX_DATA(IRQ18_MARK, PORT85_FN0),
+
+ /* Port96 - Port101 */
+ PINMUX_DATA(KEYIN0_MARK, PORT96_FN1),
+ PINMUX_DATA(KEYIN1_MARK, PORT97_FN1),
+ PINMUX_DATA(KEYIN2_MARK, PORT98_FN1),
+ PINMUX_DATA(KEYIN3_MARK, PORT99_FN1),
+ PINMUX_DATA(KEYIN4_MARK, PORT100_FN1),
+ PINMUX_DATA(KEYIN5_MARK, PORT101_FN1),
+
+ /* Port102 */
+ PINMUX_DATA(KEYIN6_MARK, PORT102_FN1),
+ PINMUX_DATA(IRQ41_MARK, PORT102_FN0),
+
+ /* Port103 */
+ PINMUX_DATA(KEYIN7_MARK, PORT103_FN1),
+ PINMUX_DATA(IRQ42_MARK, PORT103_FN0),
+
+ /* Port104 - Port108 */
+ PINMUX_DATA(KEYOUT0_MARK, PORT104_FN2),
+ PINMUX_DATA(KEYOUT1_MARK, PORT105_FN2),
+ PINMUX_DATA(KEYOUT2_MARK, PORT106_FN2),
+ PINMUX_DATA(KEYOUT3_MARK, PORT107_FN2),
+ PINMUX_DATA(KEYOUT4_MARK, PORT108_FN2),
+
+ /* Port109 */
+ PINMUX_DATA(KEYOUT5_MARK, PORT109_FN2),
+ PINMUX_DATA(IRQ43_MARK, PORT109_FN0),
+
+ /* Port110 */
+ PINMUX_DATA(KEYOUT6_MARK, PORT110_FN2),
+ PINMUX_DATA(IRQ44_MARK, PORT110_FN0),
+
+ /* Port111 */
+ PINMUX_DATA(KEYOUT7_MARK, PORT111_FN2),
+ PINMUX_DATA(RFANAEN_MARK, PORT111_FN5),
+ PINMUX_DATA(IRQ45_MARK, PORT111_FN0),
+
+ /* Port112 */
+ PINMUX_DATA(KEYIN8_MARK, PORT112_FN1),
+ PINMUX_DATA(KEYOUT8_MARK, PORT112_FN2),
+ PINMUX_DATA(SF_IRQ_04_MARK, PORT112_FN4),
+ PINMUX_DATA(IRQ46_MARK, PORT112_FN0),
+
+ /* Port113 */
+ PINMUX_DATA(KEYIN9_MARK, PORT113_FN1),
+ PINMUX_DATA(KEYOUT9_MARK, PORT113_FN2),
+ PINMUX_DATA(SF_IRQ_05_MARK, PORT113_FN4),
+ PINMUX_DATA(IRQ47_MARK, PORT113_FN0),
+
+ /* Port114 */
+ PINMUX_DATA(KEYIN10_MARK, PORT114_FN1),
+ PINMUX_DATA(KEYOUT10_MARK, PORT114_FN2),
+ PINMUX_DATA(SF_IRQ_06_MARK, PORT114_FN4),
+ PINMUX_DATA(IRQ48_MARK, PORT114_FN0),
+
+ /* Port115 */
+ PINMUX_DATA(KEYIN11_MARK, PORT115_FN1),
+ PINMUX_DATA(KEYOUT11_MARK, PORT115_FN2),
+ PINMUX_DATA(SF_IRQ_07_MARK, PORT115_FN4),
+ PINMUX_DATA(IRQ49_MARK, PORT115_FN0),
+
+ /* Port116 */
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT116_FN1),
+ PINMUX_DATA(CSCIF0_TX_MARK, PORT116_FN7),
+
+ /* Port117 */
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT117_FN1),
+ PINMUX_DATA(CSCIF0_RX_MARK, PORT117_FN7),
+
+ /* Port118 */
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT118_FN1),
+ PINMUX_DATA(CSCIF1_TX_MARK, PORT118_FN7),
+
+ /* Port119 */
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT119_FN1),
+ PINMUX_DATA(CSCIF1_RX_MARK, PORT119_FN7),
+
+ /* Port120 */
+ PINMUX_DATA(SF_PORT_1_120_MARK, PORT120_FN3),
+ PINMUX_DATA(SCIFB3_RXD_120_MARK, PORT120_FN4, MSEL3CR_09_1),
+ PINMUX_DATA(DU0_CDE_MARK, PORT120_FN7),
+
+ /* Port121 */
+ PINMUX_DATA(SF_PORT_0_121_MARK, PORT121_FN3),
+ PINMUX_DATA(SCIFB3_TXD_121_MARK, PORT121_FN4, MSEL3CR_09_1),
+
+ /* Port122 */
+ PINMUX_DATA(SCIFB0_TXD_MARK, PORT122_FN1),
+ PINMUX_DATA(CHSCIF0_HTX_MARK, PORT122_FN7),
+
+ /* Port123 */
+ PINMUX_DATA(SCIFB0_RXD_MARK, PORT123_FN1),
+ PINMUX_DATA(CHSCIF0_HRX_MARK, PORT123_FN7),
+
+ /* Port124 */
+ PINMUX_DATA(ISP_STROBE_124_MARK, PORT124_FN3),
+
+ /* Port125 */
+ PINMUX_DATA(STP_ISD_0_MARK, PORT125_FN1),
+ PINMUX_DATA(PDM4_CLK_125_MARK, PORT125_FN2),
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT125_FN3),
+ PINMUX_DATA(SIM0_VOLTSEL0_MARK, PORT125_FN5),
+
+ /* Port126 */
+ PINMUX_DATA(TS_SDEN_MARK, PORT126_FN1),
+ PINMUX_DATA(MSIOF7_SYNC_MARK, PORT126_FN2),
+ PINMUX_DATA(STP_ISEN_1_MARK, PORT126_FN3),
+
+ /* Port128 */
+ PINMUX_DATA(STP_ISEN_0_MARK, PORT128_FN1),
+ PINMUX_DATA(PDM1_OUTDATA_128_MARK, PORT128_FN2),
+ PINMUX_DATA(MSIOF2_SYNC_MARK, PORT128_FN3),
+ PINMUX_DATA(SIM1_VOLTSEL1_MARK, PORT128_FN5),
+
+ /* Port129 */
+ PINMUX_DATA(TS_SPSYNC_MARK, PORT129_FN1),
+ PINMUX_DATA(MSIOF7_RXD_MARK, PORT129_FN2),
+ PINMUX_DATA(STP_ISSYNC_1_MARK, PORT129_FN3),
+
+ /* Port130 */
+ PINMUX_DATA(STP_ISSYNC_0_MARK, PORT130_FN1),
+ PINMUX_DATA(PDM4_DATA_130_MARK, PORT130_FN2, MSEL3CR_12_1),
+ PINMUX_DATA(MSIOF2_RXD_MARK, PORT130_FN3),
+ PINMUX_DATA(SIM0_VOLTSEL1_MARK, PORT130_FN5),
+
+ /* Port131 */
+ PINMUX_DATA(STP_OPWM_0_MARK, PORT131_FN1),
+ PINMUX_DATA(SIM1_PWRON_MARK, PORT131_FN5),
+
+ /* Port132 */
+ PINMUX_DATA(TS_SCK_MARK, PORT132_FN1),
+ PINMUX_DATA(MSIOF7_SCK_MARK, PORT132_FN2),
+ PINMUX_DATA(STP_ISCLK_1_MARK, PORT132_FN3),
+
+ /* Port133 */
+ PINMUX_DATA(STP_ISCLK_0_MARK, PORT133_FN1),
+ PINMUX_DATA(PDM1_OUTCLK_133_MARK, PORT133_FN2),
+ PINMUX_DATA(MSIOF2_SCK_MARK, PORT133_FN3),
+ PINMUX_DATA(SIM1_VOLTSEL0_MARK, PORT133_FN5),
+
+ /* Port134 */
+ PINMUX_DATA(TS_SDAT_MARK, PORT134_FN1),
+ PINMUX_DATA(MSIOF7_TXD_MARK, PORT134_FN2),
+ PINMUX_DATA(STP_ISD_1_MARK, PORT134_FN3),
+
+ /* Port160 - Port178 */
+ PINMUX_DATA(IRQ20_MARK, PORT160_FN0),
+ PINMUX_DATA(IRQ21_MARK, PORT161_FN0),
+ PINMUX_DATA(IRQ22_MARK, PORT162_FN0),
+ PINMUX_DATA(IRQ23_MARK, PORT163_FN0),
+ PINMUX_DATA(MMCD0_0_MARK, PORT164_FN1),
+ PINMUX_DATA(MMCD0_1_MARK, PORT165_FN1),
+ PINMUX_DATA(MMCD0_2_MARK, PORT166_FN1),
+ PINMUX_DATA(MMCD0_3_MARK, PORT167_FN1),
+ PINMUX_DATA(MMCD0_4_MARK, PORT168_FN1),
+ PINMUX_DATA(MMCD0_5_MARK, PORT169_FN1),
+ PINMUX_DATA(MMCD0_6_MARK, PORT170_FN1),
+ PINMUX_DATA(MMCD0_7_MARK, PORT171_FN1),
+ PINMUX_DATA(MMCCMD0_MARK, PORT172_FN1),
+ PINMUX_DATA(MMCCLK0_MARK, PORT173_FN1),
+ PINMUX_DATA(MMCRST_MARK, PORT174_FN1),
+ PINMUX_DATA(IRQ24_MARK, PORT175_FN0),
+ PINMUX_DATA(IRQ25_MARK, PORT176_FN0),
+ PINMUX_DATA(IRQ26_MARK, PORT177_FN0),
+ PINMUX_DATA(IRQ27_MARK, PORT178_FN0),
+
+ /* Port192 - Port200 FN1 */
+ PINMUX_DATA(A10_MARK, PORT192_FN1),
+ PINMUX_DATA(A9_MARK, PORT193_FN1),
+ PINMUX_DATA(A8_MARK, PORT194_FN1),
+ PINMUX_DATA(A7_MARK, PORT195_FN1),
+ PINMUX_DATA(A6_MARK, PORT196_FN1),
+ PINMUX_DATA(A5_MARK, PORT197_FN1),
+ PINMUX_DATA(A4_MARK, PORT198_FN1),
+ PINMUX_DATA(A3_MARK, PORT199_FN1),
+ PINMUX_DATA(A2_MARK, PORT200_FN1),
+
+ /* Port192 - Port200 FN2 */
+ PINMUX_DATA(MMCD1_7_MARK, PORT192_FN2),
+ PINMUX_DATA(MMCD1_6_MARK, PORT193_FN2),
+ PINMUX_DATA(MMCD1_5_MARK, PORT194_FN2),
+ PINMUX_DATA(MMCD1_4_MARK, PORT195_FN2),
+ PINMUX_DATA(MMCD1_3_MARK, PORT196_FN2),
+ PINMUX_DATA(MMCD1_2_MARK, PORT197_FN2),
+ PINMUX_DATA(MMCD1_1_MARK, PORT198_FN2),
+ PINMUX_DATA(MMCD1_0_MARK, PORT199_FN2),
+ PINMUX_DATA(MMCCMD1_MARK, PORT200_FN2),
+
+ /* Port192 - Port200 IRQ */
+ PINMUX_DATA(IRQ31_MARK, PORT192_FN0),
+ PINMUX_DATA(IRQ32_MARK, PORT193_FN0),
+ PINMUX_DATA(IRQ33_MARK, PORT194_FN0),
+ PINMUX_DATA(IRQ34_MARK, PORT195_FN0),
+ PINMUX_DATA(IRQ35_MARK, PORT196_FN0),
+ PINMUX_DATA(IRQ36_MARK, PORT197_FN0),
+ PINMUX_DATA(IRQ37_MARK, PORT198_FN0),
+ PINMUX_DATA(IRQ38_MARK, PORT199_FN0),
+ PINMUX_DATA(IRQ39_MARK, PORT200_FN0),
+
+ /* Port201 */
+ PINMUX_DATA(A1_MARK, PORT201_FN1),
+
+ /* Port202 */
+ PINMUX_DATA(A0_MARK, PORT202_FN1),
+ PINMUX_DATA(BS_MARK, PORT202_FN2),
+
+ /* Port203 */
+ PINMUX_DATA(CKO_MARK, PORT203_FN1),
+ PINMUX_DATA(MMCCLK1_MARK, PORT203_FN2),
+
+ /* Port204 */
+ PINMUX_DATA(CS0_N_MARK, PORT204_FN1),
+ PINMUX_DATA(SIM0_GPO1_MARK, PORT204_FN5),
+
+ /* Port205 */
+ PINMUX_DATA(CS2_N_MARK, PORT205_FN1),
+ PINMUX_DATA(SIM0_GPO2_MARK, PORT205_FN5),
+
+ /* Port206 */
+ PINMUX_DATA(CS4_N_MARK, PORT206_FN1),
+ PINMUX_DATA(VIO_VD_MARK, PORT206_FN2),
+ PINMUX_DATA(SIM1_GPO0_MARK, PORT206_FN5),
+
+ /* Port207 - Port212 FN1 */
+ PINMUX_DATA(D15_MARK, PORT207_FN1),
+ PINMUX_DATA(D14_MARK, PORT208_FN1),
+ PINMUX_DATA(D13_MARK, PORT209_FN1),
+ PINMUX_DATA(D12_MARK, PORT210_FN1),
+ PINMUX_DATA(D11_MARK, PORT211_FN1),
+ PINMUX_DATA(D10_MARK, PORT212_FN1),
+
+ /* Port207 - Port212 FN5 */
+ PINMUX_DATA(GIO_OUT15_MARK, PORT207_FN5),
+ PINMUX_DATA(GIO_OUT14_MARK, PORT208_FN5),
+ PINMUX_DATA(GIO_OUT13_MARK, PORT209_FN5),
+ PINMUX_DATA(GIO_OUT12_MARK, PORT210_FN5),
+ PINMUX_DATA(WGM_TXP2_MARK, PORT211_FN5),
+ PINMUX_DATA(WGM_GPS_TIMEM_ASK_RFCLK_MARK, PORT212_FN5),
+
+ /* Port213 - Port222 FN1 */
+ PINMUX_DATA(D9_MARK, PORT213_FN1),
+ PINMUX_DATA(D8_MARK, PORT214_FN1),
+ PINMUX_DATA(D7_MARK, PORT215_FN1),
+ PINMUX_DATA(D6_MARK, PORT216_FN1),
+ PINMUX_DATA(D5_MARK, PORT217_FN1),
+ PINMUX_DATA(D4_MARK, PORT218_FN1),
+ PINMUX_DATA(D3_MARK, PORT219_FN1),
+ PINMUX_DATA(D2_MARK, PORT220_FN1),
+ PINMUX_DATA(D1_MARK, PORT221_FN1),
+ PINMUX_DATA(D0_MARK, PORT222_FN1),
+
+ /* Port213 - Port222 FN2 */
+ PINMUX_DATA(VIO_D9_MARK, PORT213_FN2),
+ PINMUX_DATA(VIO_D8_MARK, PORT214_FN2),
+ PINMUX_DATA(VIO_D7_MARK, PORT215_FN2),
+ PINMUX_DATA(VIO_D6_MARK, PORT216_FN2),
+ PINMUX_DATA(VIO_D5_MARK, PORT217_FN2),
+ PINMUX_DATA(VIO_D4_MARK, PORT218_FN2),
+ PINMUX_DATA(VIO_D3_MARK, PORT219_FN2),
+ PINMUX_DATA(VIO_D2_MARK, PORT220_FN2),
+ PINMUX_DATA(VIO_D1_MARK, PORT221_FN2),
+ PINMUX_DATA(VIO_D0_MARK, PORT222_FN2),
+
+ /* Port213 - Port222 FN5 */
+ PINMUX_DATA(GIO_OUT9_MARK, PORT213_FN5),
+ PINMUX_DATA(GIO_OUT8_MARK, PORT214_FN5),
+ PINMUX_DATA(GIO_OUT7_MARK, PORT215_FN5),
+ PINMUX_DATA(GIO_OUT6_MARK, PORT216_FN5),
+ PINMUX_DATA(GIO_OUT5_217_MARK, PORT217_FN5),
+ PINMUX_DATA(GIO_OUT4_218_MARK, PORT218_FN5),
+ PINMUX_DATA(GIO_OUT3_219_MARK, PORT219_FN5),
+ PINMUX_DATA(GIO_OUT2_220_MARK, PORT220_FN5),
+ PINMUX_DATA(GIO_OUT1_221_MARK, PORT221_FN5),
+ PINMUX_DATA(GIO_OUT0_222_MARK, PORT222_FN5),
+
+ /* Port224 */
+ PINMUX_DATA(RDWR_224_MARK, PORT224_FN1),
+ PINMUX_DATA(VIO_HD_MARK, PORT224_FN2),
+ PINMUX_DATA(SIM1_GPO2_MARK, PORT224_FN5),
+
+ /* Port225 */
+ PINMUX_DATA(RD_N_MARK, PORT225_FN1),
+
+ /* Port226 */
+ PINMUX_DATA(WAIT_N_MARK, PORT226_FN1),
+ PINMUX_DATA(VIO_CLK_MARK, PORT226_FN2),
+ PINMUX_DATA(SIM1_GPO1_MARK, PORT226_FN5),
+
+ /* Port227 */
+ PINMUX_DATA(WE0_N_MARK, PORT227_FN1),
+ PINMUX_DATA(RDWR_227_MARK, PORT227_FN2),
+
+ /* Port228 */
+ PINMUX_DATA(WE1_N_MARK, PORT228_FN1),
+ PINMUX_DATA(SIM0_GPO0_MARK, PORT228_FN5),
+
+ /* Port229 */
+ PINMUX_DATA(PWMO_MARK, PORT229_FN1),
+ PINMUX_DATA(VIO_CKO1_229_MARK, PORT229_FN2),
+
+ /* Port230 */
+ PINMUX_DATA(SLIM_CLK_MARK, PORT230_FN1),
+ PINMUX_DATA(VIO_CKO4_230_MARK, PORT230_FN2),
+
+ /* Port231 */
+ PINMUX_DATA(SLIM_DATA_MARK, PORT231_FN1),
+ PINMUX_DATA(VIO_CKO5_231_MARK, PORT231_FN2),
+
+ /* Port232 */
+ PINMUX_DATA(VIO_CKO2_232_MARK, PORT232_FN2),
+ PINMUX_DATA(SF_PORT_0_232_MARK, PORT232_FN4),
+
+ /* Port233 */
+ PINMUX_DATA(VIO_CKO3_233_MARK, PORT233_FN2),
+ PINMUX_DATA(SF_PORT_1_233_MARK, PORT233_FN4),
+
+ /* Port234 */
+ PINMUX_DATA(FSIACK_MARK, PORT234_FN1),
+ PINMUX_DATA(PDM3_CLK_234_MARK, PORT234_FN2),
+ PINMUX_DATA(ISP_IRIS1_234_MARK, PORT234_FN3),
+
+ /* Port235 */
+ PINMUX_DATA(FSIAISLD_MARK, PORT235_FN1),
+ PINMUX_DATA(PDM3_DATA_235_MARK, PORT235_FN2, MSEL3CR_12_1),
+
+ /* Port236 */
+ PINMUX_DATA(FSIAOMC_MARK, PORT236_FN1),
+ PINMUX_DATA(PDM0_OUTCLK_236_MARK, PORT236_FN2),
+ PINMUX_DATA(ISP_IRIS0_236_MARK, PORT236_FN3),
+
+ /* Port237 */
+ PINMUX_DATA(FSIAOLR_MARK, PORT237_FN1),
+ PINMUX_DATA(FSIAILR_MARK, PORT237_FN2),
+
+ /* Port238 */
+ PINMUX_DATA(FSIAOBT_MARK, PORT238_FN1),
+ PINMUX_DATA(FSIAIBT_MARK, PORT238_FN2),
+
+ /* Port239 */
+ PINMUX_DATA(FSIAOSLD_MARK, PORT239_FN1),
+ PINMUX_DATA(PDM0_OUTDATA_239_MARK, PORT239_FN2),
+
+ /* Port240 */
+ PINMUX_DATA(FSIBISLD_MARK, PORT240_FN1),
+
+ /* Port241 */
+ PINMUX_DATA(FSIBOLR_MARK, PORT241_FN1),
+ PINMUX_DATA(FSIBILR_MARK, PORT241_FN2),
+
+ /* Port242 */
+ PINMUX_DATA(FSIBOMC_MARK, PORT242_FN1),
+ PINMUX_DATA(ISP_SHUTTER1_242_MARK, PORT242_FN3),
+
+ /* Port243 */
+ PINMUX_DATA(FSIBOBT_MARK, PORT243_FN1),
+ PINMUX_DATA(FSIBIBT_MARK, PORT243_FN2),
+
+ /* Port244 */
+ PINMUX_DATA(FSIBOSLD_MARK, PORT244_FN1),
+ PINMUX_DATA(FSIASPDIF_MARK, PORT244_FN2),
+
+ /* Port245 */
+ PINMUX_DATA(FSIBCK_MARK, PORT245_FN1),
+ PINMUX_DATA(ISP_SHUTTER0_245_MARK, PORT245_FN3),
+
+ /* Port246 - Port250 FN1 */
+ PINMUX_DATA(ISP_IRIS1_246_MARK, PORT246_FN1),
+ PINMUX_DATA(ISP_IRIS0_247_MARK, PORT247_FN1),
+ PINMUX_DATA(ISP_SHUTTER1_248_MARK, PORT248_FN1),
+ PINMUX_DATA(ISP_SHUTTER0_249_MARK, PORT249_FN1),
+ PINMUX_DATA(ISP_STROBE_250_MARK, PORT250_FN1),
+
+ /* Port256 - Port258 */
+ PINMUX_DATA(MSIOF0_SYNC_MARK, PORT256_FN1),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT257_FN1),
+ PINMUX_DATA(MSIOF0_SCK_MARK, PORT258_FN1),
+
+ /* Port259 */
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT259_FN1),
+ PINMUX_DATA(VIO_CKO3_259_MARK, PORT259_FN3),
+
+ /* Port260 */
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT260_FN1),
+
+ /* Port261 */
+ PINMUX_DATA(SCIFB1_SCK_261_MARK, PORT261_FN2),
+ PINMUX_DATA(CHSCIF1_HSCK_MARK, PORT261_FN7),
+
+ /* Port262 */
+ PINMUX_DATA(SCIFB2_SCK_262_MARK, PORT262_FN2),
+
+ /* Port263 - Port266 FN1 */
+ PINMUX_DATA(MSIOF1_SS2_MARK, PORT263_FN1),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PORT264_FN1),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PORT265_FN1),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PORT266_FN1),
+
+ /* Port263 - Port266 FN4 */
+ PINMUX_DATA(MSIOF5_SS2_MARK, PORT263_FN4),
+ PINMUX_DATA(MSIOF5_TXD_MARK, PORT264_FN4),
+ PINMUX_DATA(MSIOF5_RXD_MARK, PORT265_FN4),
+ PINMUX_DATA(MSIOF5_SS1_MARK, PORT266_FN4),
+
+ /* Port267 */
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT267_FN1),
+
+ /* Port268 */
+ PINMUX_DATA(MSIOF1_SCK_MARK, PORT268_FN1),
+ PINMUX_DATA(MSIOF5_SCK_MARK, PORT268_FN4),
+
+ /* Port269 */
+ PINMUX_DATA(MSIOF1_SYNC_MARK, PORT269_FN1),
+ PINMUX_DATA(MSIOF5_SYNC_MARK, PORT269_FN4),
+
+ /* Port270 - Port273 FN1 */
+ PINMUX_DATA(MSIOF2_SS1_MARK, PORT270_FN1),
+ PINMUX_DATA(MSIOF2_SS2_MARK, PORT271_FN1),
+ PINMUX_DATA(MSIOF3_SS2_MARK, PORT272_FN1),
+ PINMUX_DATA(MSIOF3_SS1_MARK, PORT273_FN1),
+
+ /* Port270 - Port273 FN3 */
+ PINMUX_DATA(VIO_CKO5_270_MARK, PORT270_FN3),
+ PINMUX_DATA(VIO_CKO2_271_MARK, PORT271_FN3),
+ PINMUX_DATA(VIO_CKO1_272_MARK, PORT272_FN3),
+ PINMUX_DATA(VIO_CKO4_273_MARK, PORT273_FN3),
+
+ /* Port274 */
+ PINMUX_DATA(MSIOF4_SS2_MARK, PORT274_FN1),
+ PINMUX_DATA(TPU1TO0_MARK, PORT274_FN4),
+
+ /* Port275 - Port280 */
+ PINMUX_DATA(IC_DP_MARK, PORT275_FN1),
+ PINMUX_DATA(SIM0_RST_MARK, PORT276_FN1),
+ PINMUX_DATA(IC_DM_MARK, PORT277_FN1),
+ PINMUX_DATA(SIM0_BSICOMP_MARK, PORT278_FN1),
+ PINMUX_DATA(SIM0_CLK_MARK, PORT279_FN1),
+ PINMUX_DATA(SIM0_IO_MARK, PORT280_FN1),
+
+ /* Port281 */
+ PINMUX_DATA(SIM1_IO_MARK, PORT281_FN1),
+ PINMUX_DATA(PDM2_DATA_281_MARK, PORT281_FN2, MSEL3CR_12_1),
+
+ /* Port282 */
+ PINMUX_DATA(SIM1_CLK_MARK, PORT282_FN1),
+ PINMUX_DATA(PDM2_CLK_282_MARK, PORT282_FN2),
+
+ /* Port283 */
+ PINMUX_DATA(SIM1_RST_MARK, PORT283_FN1),
+
+ /* Port289 */
+ PINMUX_DATA(SDHID1_0_MARK, PORT289_FN1),
+ PINMUX_DATA(STMDATA0_2_MARK, PORT289_FN3),
+
+ /* Port290 */
+ PINMUX_DATA(SDHID1_1_MARK, PORT290_FN1),
+ PINMUX_DATA(STMDATA1_2_MARK, PORT290_FN3),
+ PINMUX_DATA(IRQ51_MARK, PORT290_FN0),
+
+ /* Port291 - Port294 FN1 */
+ PINMUX_DATA(SDHID1_2_MARK, PORT291_FN1),
+ PINMUX_DATA(SDHID1_3_MARK, PORT292_FN1),
+ PINMUX_DATA(SDHICLK1_MARK, PORT293_FN1),
+ PINMUX_DATA(SDHICMD1_MARK, PORT294_FN1),
+
+ /* Port291 - Port294 FN3 */
+ PINMUX_DATA(STMDATA2_2_MARK, PORT291_FN3),
+ PINMUX_DATA(STMDATA3_2_MARK, PORT292_FN3),
+ PINMUX_DATA(STMCLK_2_MARK, PORT293_FN3),
+ PINMUX_DATA(STMSIDI_2_MARK, PORT294_FN3),
+
+ /* Port295 */
+ PINMUX_DATA(SDHID2_0_MARK, PORT295_FN1),
+ PINMUX_DATA(MSIOF4_TXD_MARK, PORT295_FN2),
+ PINMUX_DATA(SCIFB2_TXD_295_MARK, PORT295_FN3, MSEL3CR_10_1),
+ PINMUX_DATA(MSIOF6_TXD_MARK, PORT295_FN4),
+
+ /* Port296 */
+ PINMUX_DATA(SDHID2_1_MARK, PORT296_FN1),
+ PINMUX_DATA(MSIOF6_SS2_MARK, PORT296_FN4),
+ PINMUX_DATA(IRQ52_MARK, PORT296_FN0),
+
+ /* Port297 - Port300 FN1 */
+ PINMUX_DATA(SDHID2_2_MARK, PORT297_FN1),
+ PINMUX_DATA(SDHID2_3_MARK, PORT298_FN1),
+ PINMUX_DATA(SDHICLK2_MARK, PORT299_FN1),
+ PINMUX_DATA(SDHICMD2_MARK, PORT300_FN1),
+
+ /* Port297 - Port300 FN2 */
+ PINMUX_DATA(MSIOF4_RXD_MARK, PORT297_FN2),
+ PINMUX_DATA(MSIOF4_SYNC_MARK, PORT298_FN2),
+ PINMUX_DATA(MSIOF4_SCK_MARK, PORT299_FN2),
+ PINMUX_DATA(MSIOF4_SS1_MARK, PORT300_FN2),
+
+ /* Port297 - Port300 FN3 */
+ PINMUX_DATA(SCIFB2_RXD_297_MARK, PORT297_FN3, MSEL3CR_10_1),
+ PINMUX_DATA(SCIFB2_CTS_298_MARK, PORT298_FN3, MSEL3CR_10_1),
+ PINMUX_DATA(SCIFB2_SCK_299_MARK, PORT299_FN3),
+ PINMUX_DATA(SCIFB2_RTS_300_MARK, PORT300_FN3),
+
+ /* Port297 - Port300 FN4 */
+ PINMUX_DATA(MSIOF6_RXD_MARK, PORT297_FN4),
+ PINMUX_DATA(MSIOF6_SYNC_MARK, PORT298_FN4),
+ PINMUX_DATA(MSIOF6_SCK_MARK, PORT299_FN4),
+ PINMUX_DATA(MSIOF6_SS1_MARK, PORT300_FN4),
+
+ /* Port301 */
+ PINMUX_DATA(SDHICD0_MARK, PORT301_FN1),
+ PINMUX_DATA(IRQ50_MARK, PORT301_FN0),
+
+ /* Port302 - Port306 FN1 */
+ PINMUX_DATA(SDHID0_0_MARK, PORT302_FN1),
+ PINMUX_DATA(SDHID0_1_MARK, PORT303_FN1),
+ PINMUX_DATA(SDHID0_2_MARK, PORT304_FN1),
+ PINMUX_DATA(SDHID0_3_MARK, PORT305_FN1),
+ PINMUX_DATA(SDHICMD0_MARK, PORT306_FN1),
+
+ /* Port302 - Port306 FN3 */
+ PINMUX_DATA(STMDATA0_1_MARK, PORT302_FN3),
+ PINMUX_DATA(STMDATA1_1_MARK, PORT303_FN3),
+ PINMUX_DATA(STMDATA2_1_MARK, PORT304_FN3),
+ PINMUX_DATA(STMDATA3_1_MARK, PORT305_FN3),
+ PINMUX_DATA(STMSIDI_1_MARK, PORT306_FN3),
+
+ /* Port307 */
+ PINMUX_DATA(SDHIWP0_MARK, PORT307_FN1),
+
+ /* Port308 */
+ PINMUX_DATA(SDHICLK0_MARK, PORT308_FN1),
+ PINMUX_DATA(STMCLK_1_MARK, PORT308_FN3),
+
+ /* Port320 - Port329 */
+ PINMUX_DATA(IRQ16_MARK, PORT320_FN0),
+ PINMUX_DATA(IRQ17_MARK, PORT321_FN0),
+ PINMUX_DATA(IRQ28_MARK, PORT322_FN0),
+ PINMUX_DATA(IRQ29_MARK, PORT323_FN0),
+ PINMUX_DATA(IRQ30_MARK, PORT324_FN0),
+ PINMUX_DATA(IRQ53_MARK, PORT325_FN0),
+ PINMUX_DATA(IRQ54_MARK, PORT326_FN0),
+ PINMUX_DATA(IRQ55_MARK, PORT327_FN0),
+ PINMUX_DATA(IRQ56_MARK, PORT328_FN0),
+ PINMUX_DATA(IRQ57_MARK, PORT329_FN0),
+};
+
+#define R8A73A4_PIN(pin, cfgs) \
+ { \
+ .name = __stringify(PORT##pin), \
+ .enum_id = PORT##pin##_DATA, \
+ .configs = cfgs, \
+ }
+
+#define __O (SH_PFC_PIN_CFG_OUTPUT)
+#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
+#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
+
+#define R8A73A4_PIN_IO_PU_PD(pin) R8A73A4_PIN(pin, __IO | __PUD)
+#define R8A73A4_PIN_O(pin) R8A73A4_PIN(pin, __O)
+
+static struct sh_pfc_pin pinmux_pins[] = {
+ R8A73A4_PIN_IO_PU_PD(0), R8A73A4_PIN_IO_PU_PD(1),
+ R8A73A4_PIN_IO_PU_PD(2), R8A73A4_PIN_IO_PU_PD(3),
+ R8A73A4_PIN_IO_PU_PD(4), R8A73A4_PIN_IO_PU_PD(5),
+ R8A73A4_PIN_IO_PU_PD(6), R8A73A4_PIN_IO_PU_PD(7),
+ R8A73A4_PIN_IO_PU_PD(8), R8A73A4_PIN_IO_PU_PD(9),
+ R8A73A4_PIN_IO_PU_PD(10), R8A73A4_PIN_IO_PU_PD(11),
+ R8A73A4_PIN_IO_PU_PD(12), R8A73A4_PIN_IO_PU_PD(13),
+ R8A73A4_PIN_IO_PU_PD(14), R8A73A4_PIN_IO_PU_PD(15),
+ R8A73A4_PIN_IO_PU_PD(16), R8A73A4_PIN_IO_PU_PD(17),
+ R8A73A4_PIN_IO_PU_PD(18), R8A73A4_PIN_IO_PU_PD(19),
+ R8A73A4_PIN_IO_PU_PD(20), R8A73A4_PIN_IO_PU_PD(21),
+ R8A73A4_PIN_IO_PU_PD(22), R8A73A4_PIN_IO_PU_PD(23),
+ R8A73A4_PIN_IO_PU_PD(24), R8A73A4_PIN_IO_PU_PD(25),
+ R8A73A4_PIN_IO_PU_PD(26), R8A73A4_PIN_IO_PU_PD(27),
+ R8A73A4_PIN_IO_PU_PD(28), R8A73A4_PIN_IO_PU_PD(29),
+ R8A73A4_PIN_IO_PU_PD(30),
+ R8A73A4_PIN_IO_PU_PD(32), R8A73A4_PIN_IO_PU_PD(33),
+ R8A73A4_PIN_IO_PU_PD(34), R8A73A4_PIN_IO_PU_PD(35),
+ R8A73A4_PIN_IO_PU_PD(36), R8A73A4_PIN_IO_PU_PD(37),
+ R8A73A4_PIN_IO_PU_PD(38), R8A73A4_PIN_IO_PU_PD(39),
+ R8A73A4_PIN_IO_PU_PD(40),
+ R8A73A4_PIN_IO_PU_PD(64), R8A73A4_PIN_IO_PU_PD(65),
+ R8A73A4_PIN_IO_PU_PD(66), R8A73A4_PIN_IO_PU_PD(67),
+ R8A73A4_PIN_IO_PU_PD(68), R8A73A4_PIN_IO_PU_PD(69),
+ R8A73A4_PIN_IO_PU_PD(70), R8A73A4_PIN_IO_PU_PD(71),
+ R8A73A4_PIN_IO_PU_PD(72), R8A73A4_PIN_IO_PU_PD(73),
+ R8A73A4_PIN_O(74), R8A73A4_PIN_IO_PU_PD(75),
+ R8A73A4_PIN_IO_PU_PD(76), R8A73A4_PIN_IO_PU_PD(77),
+ R8A73A4_PIN_IO_PU_PD(78), R8A73A4_PIN_IO_PU_PD(79),
+ R8A73A4_PIN_IO_PU_PD(80), R8A73A4_PIN_IO_PU_PD(81),
+ R8A73A4_PIN_IO_PU_PD(82), R8A73A4_PIN_IO_PU_PD(83),
+ R8A73A4_PIN_IO_PU_PD(84), R8A73A4_PIN_IO_PU_PD(85),
+ R8A73A4_PIN_IO_PU_PD(96), R8A73A4_PIN_IO_PU_PD(97),
+ R8A73A4_PIN_IO_PU_PD(98), R8A73A4_PIN_IO_PU_PD(99),
+ R8A73A4_PIN_IO_PU_PD(100), R8A73A4_PIN_IO_PU_PD(101),
+ R8A73A4_PIN_IO_PU_PD(102), R8A73A4_PIN_IO_PU_PD(103),
+ R8A73A4_PIN_IO_PU_PD(104), R8A73A4_PIN_IO_PU_PD(105),
+ R8A73A4_PIN_IO_PU_PD(106), R8A73A4_PIN_IO_PU_PD(107),
+ R8A73A4_PIN_IO_PU_PD(108), R8A73A4_PIN_IO_PU_PD(109),
+ R8A73A4_PIN_IO_PU_PD(110), R8A73A4_PIN_IO_PU_PD(111),
+ R8A73A4_PIN_IO_PU_PD(112), R8A73A4_PIN_IO_PU_PD(113),
+ R8A73A4_PIN_IO_PU_PD(114), R8A73A4_PIN_IO_PU_PD(115),
+ R8A73A4_PIN_IO_PU_PD(116), R8A73A4_PIN_IO_PU_PD(117),
+ R8A73A4_PIN_IO_PU_PD(118), R8A73A4_PIN_IO_PU_PD(119),
+ R8A73A4_PIN_IO_PU_PD(120), R8A73A4_PIN_IO_PU_PD(121),
+ R8A73A4_PIN_IO_PU_PD(122), R8A73A4_PIN_IO_PU_PD(123),
+ R8A73A4_PIN_IO_PU_PD(124), R8A73A4_PIN_IO_PU_PD(125),
+ R8A73A4_PIN_IO_PU_PD(126),
+ R8A73A4_PIN_IO_PU_PD(128), R8A73A4_PIN_IO_PU_PD(129),
+ R8A73A4_PIN_IO_PU_PD(130), R8A73A4_PIN_IO_PU_PD(131),
+ R8A73A4_PIN_IO_PU_PD(132), R8A73A4_PIN_IO_PU_PD(133),
+ R8A73A4_PIN_IO_PU_PD(134),
+ R8A73A4_PIN_IO_PU_PD(160), R8A73A4_PIN_IO_PU_PD(161),
+ R8A73A4_PIN_IO_PU_PD(162), R8A73A4_PIN_IO_PU_PD(163),
+ R8A73A4_PIN_IO_PU_PD(164), R8A73A4_PIN_IO_PU_PD(165),
+ R8A73A4_PIN_IO_PU_PD(166), R8A73A4_PIN_IO_PU_PD(167),
+ R8A73A4_PIN_IO_PU_PD(168), R8A73A4_PIN_IO_PU_PD(169),
+ R8A73A4_PIN_IO_PU_PD(170), R8A73A4_PIN_IO_PU_PD(171),
+ R8A73A4_PIN_IO_PU_PD(172), R8A73A4_PIN_IO_PU_PD(173),
+ R8A73A4_PIN_IO_PU_PD(174), R8A73A4_PIN_IO_PU_PD(175),
+ R8A73A4_PIN_IO_PU_PD(176), R8A73A4_PIN_IO_PU_PD(177),
+ R8A73A4_PIN_IO_PU_PD(178),
+ R8A73A4_PIN_IO_PU_PD(192), R8A73A4_PIN_IO_PU_PD(193),
+ R8A73A4_PIN_IO_PU_PD(194), R8A73A4_PIN_IO_PU_PD(195),
+ R8A73A4_PIN_IO_PU_PD(196), R8A73A4_PIN_IO_PU_PD(197),
+ R8A73A4_PIN_IO_PU_PD(198), R8A73A4_PIN_IO_PU_PD(199),
+ R8A73A4_PIN_IO_PU_PD(200), R8A73A4_PIN_IO_PU_PD(201),
+ R8A73A4_PIN_IO_PU_PD(202), R8A73A4_PIN_IO_PU_PD(203),
+ R8A73A4_PIN_IO_PU_PD(204), R8A73A4_PIN_IO_PU_PD(205),
+ R8A73A4_PIN_IO_PU_PD(206), R8A73A4_PIN_IO_PU_PD(207),
+ R8A73A4_PIN_IO_PU_PD(208), R8A73A4_PIN_IO_PU_PD(209),
+ R8A73A4_PIN_IO_PU_PD(210), R8A73A4_PIN_IO_PU_PD(211),
+ R8A73A4_PIN_IO_PU_PD(212), R8A73A4_PIN_IO_PU_PD(213),
+ R8A73A4_PIN_IO_PU_PD(214), R8A73A4_PIN_IO_PU_PD(215),
+ R8A73A4_PIN_IO_PU_PD(216), R8A73A4_PIN_IO_PU_PD(217),
+ R8A73A4_PIN_IO_PU_PD(218), R8A73A4_PIN_IO_PU_PD(219),
+ R8A73A4_PIN_IO_PU_PD(220), R8A73A4_PIN_IO_PU_PD(221),
+ R8A73A4_PIN_IO_PU_PD(222),
+ R8A73A4_PIN_IO_PU_PD(224), R8A73A4_PIN_IO_PU_PD(225),
+ R8A73A4_PIN_IO_PU_PD(226), R8A73A4_PIN_IO_PU_PD(227),
+ R8A73A4_PIN_IO_PU_PD(228), R8A73A4_PIN_IO_PU_PD(229),
+ R8A73A4_PIN_IO_PU_PD(230), R8A73A4_PIN_IO_PU_PD(231),
+ R8A73A4_PIN_IO_PU_PD(232), R8A73A4_PIN_IO_PU_PD(233),
+ R8A73A4_PIN_IO_PU_PD(234), R8A73A4_PIN_IO_PU_PD(235),
+ R8A73A4_PIN_IO_PU_PD(236), R8A73A4_PIN_IO_PU_PD(237),
+ R8A73A4_PIN_IO_PU_PD(238), R8A73A4_PIN_IO_PU_PD(239),
+ R8A73A4_PIN_IO_PU_PD(240), R8A73A4_PIN_IO_PU_PD(241),
+ R8A73A4_PIN_IO_PU_PD(242), R8A73A4_PIN_IO_PU_PD(243),
+ R8A73A4_PIN_IO_PU_PD(244), R8A73A4_PIN_IO_PU_PD(245),
+ R8A73A4_PIN_IO_PU_PD(246), R8A73A4_PIN_IO_PU_PD(247),
+ R8A73A4_PIN_IO_PU_PD(248), R8A73A4_PIN_IO_PU_PD(249),
+ R8A73A4_PIN_IO_PU_PD(250),
+ R8A73A4_PIN_IO_PU_PD(256), R8A73A4_PIN_IO_PU_PD(257),
+ R8A73A4_PIN_IO_PU_PD(258), R8A73A4_PIN_IO_PU_PD(259),
+ R8A73A4_PIN_IO_PU_PD(260), R8A73A4_PIN_IO_PU_PD(261),
+ R8A73A4_PIN_IO_PU_PD(262), R8A73A4_PIN_IO_PU_PD(263),
+ R8A73A4_PIN_IO_PU_PD(264), R8A73A4_PIN_IO_PU_PD(265),
+ R8A73A4_PIN_IO_PU_PD(266), R8A73A4_PIN_IO_PU_PD(267),
+ R8A73A4_PIN_IO_PU_PD(268), R8A73A4_PIN_IO_PU_PD(269),
+ R8A73A4_PIN_IO_PU_PD(270), R8A73A4_PIN_IO_PU_PD(271),
+ R8A73A4_PIN_IO_PU_PD(272), R8A73A4_PIN_IO_PU_PD(273),
+ R8A73A4_PIN_IO_PU_PD(274), R8A73A4_PIN_IO_PU_PD(275),
+ R8A73A4_PIN_IO_PU_PD(276), R8A73A4_PIN_IO_PU_PD(277),
+ R8A73A4_PIN_IO_PU_PD(278), R8A73A4_PIN_IO_PU_PD(279),
+ R8A73A4_PIN_IO_PU_PD(280), R8A73A4_PIN_IO_PU_PD(281),
+ R8A73A4_PIN_IO_PU_PD(282), R8A73A4_PIN_IO_PU_PD(283),
+ R8A73A4_PIN_O(288), R8A73A4_PIN_IO_PU_PD(289),
+ R8A73A4_PIN_IO_PU_PD(290), R8A73A4_PIN_IO_PU_PD(291),
+ R8A73A4_PIN_IO_PU_PD(292), R8A73A4_PIN_IO_PU_PD(293),
+ R8A73A4_PIN_IO_PU_PD(294), R8A73A4_PIN_IO_PU_PD(295),
+ R8A73A4_PIN_IO_PU_PD(296), R8A73A4_PIN_IO_PU_PD(297),
+ R8A73A4_PIN_IO_PU_PD(298), R8A73A4_PIN_IO_PU_PD(299),
+ R8A73A4_PIN_IO_PU_PD(300), R8A73A4_PIN_IO_PU_PD(301),
+ R8A73A4_PIN_IO_PU_PD(302), R8A73A4_PIN_IO_PU_PD(303),
+ R8A73A4_PIN_IO_PU_PD(304), R8A73A4_PIN_IO_PU_PD(305),
+ R8A73A4_PIN_IO_PU_PD(306), R8A73A4_PIN_IO_PU_PD(307),
+ R8A73A4_PIN_IO_PU_PD(308),
+ R8A73A4_PIN_IO_PU_PD(320), R8A73A4_PIN_IO_PU_PD(321),
+ R8A73A4_PIN_IO_PU_PD(322), R8A73A4_PIN_IO_PU_PD(323),
+ R8A73A4_PIN_IO_PU_PD(324), R8A73A4_PIN_IO_PU_PD(325),
+ R8A73A4_PIN_IO_PU_PD(326), R8A73A4_PIN_IO_PU_PD(327),
+ R8A73A4_PIN_IO_PU_PD(328), R8A73A4_PIN_IO_PU_PD(329),
+};
+
+static const struct pinmux_range pinmux_ranges[] = {
+ {.begin = 0, .end = 30,},
+ {.begin = 32, .end = 40,},
+ {.begin = 64, .end = 85,},
+ {.begin = 96, .end = 126,},
+ {.begin = 128, .end = 134,},
+ {.begin = 160, .end = 178,},
+ {.begin = 192, .end = 222,},
+ {.begin = 224, .end = 250,},
+ {.begin = 256, .end = 283,},
+ {.begin = 288, .end = 308,},
+ {.begin = 320, .end = 329,},
+};
+
+/* - IRQC ------------------------------------------------------------------- */
+#define IRQC_PINS_MUX(pin, irq_mark) \
+static const unsigned int irqc_irq##irq_mark##_pins[] = { \
+ pin, \
+}; \
+static const unsigned int irqc_irq##irq_mark##_mux[] = { \
+ IRQ##irq_mark##_MARK, \
+}
+IRQC_PINS_MUX(0, 0);
+IRQC_PINS_MUX(1, 1);
+IRQC_PINS_MUX(2, 2);
+IRQC_PINS_MUX(3, 3);
+IRQC_PINS_MUX(4, 4);
+IRQC_PINS_MUX(5, 5);
+IRQC_PINS_MUX(6, 6);
+IRQC_PINS_MUX(7, 7);
+IRQC_PINS_MUX(8, 8);
+IRQC_PINS_MUX(9, 9);
+IRQC_PINS_MUX(10, 10);
+IRQC_PINS_MUX(11, 11);
+IRQC_PINS_MUX(12, 12);
+IRQC_PINS_MUX(13, 13);
+IRQC_PINS_MUX(14, 14);
+IRQC_PINS_MUX(15, 15);
+IRQC_PINS_MUX(66, 40);
+IRQC_PINS_MUX(84, 19);
+IRQC_PINS_MUX(85, 18);
+IRQC_PINS_MUX(102, 41);
+IRQC_PINS_MUX(103, 42);
+IRQC_PINS_MUX(109, 43);
+IRQC_PINS_MUX(110, 44);
+IRQC_PINS_MUX(111, 45);
+IRQC_PINS_MUX(112, 46);
+IRQC_PINS_MUX(113, 47);
+IRQC_PINS_MUX(114, 48);
+IRQC_PINS_MUX(115, 49);
+IRQC_PINS_MUX(160, 20);
+IRQC_PINS_MUX(161, 21);
+IRQC_PINS_MUX(162, 22);
+IRQC_PINS_MUX(163, 23);
+IRQC_PINS_MUX(175, 24);
+IRQC_PINS_MUX(176, 25);
+IRQC_PINS_MUX(177, 26);
+IRQC_PINS_MUX(178, 27);
+IRQC_PINS_MUX(192, 31);
+IRQC_PINS_MUX(193, 32);
+IRQC_PINS_MUX(194, 33);
+IRQC_PINS_MUX(195, 34);
+IRQC_PINS_MUX(196, 35);
+IRQC_PINS_MUX(197, 36);
+IRQC_PINS_MUX(198, 37);
+IRQC_PINS_MUX(199, 38);
+IRQC_PINS_MUX(200, 39);
+IRQC_PINS_MUX(290, 51);
+IRQC_PINS_MUX(296, 52);
+IRQC_PINS_MUX(301, 50);
+IRQC_PINS_MUX(320, 16);
+IRQC_PINS_MUX(321, 17);
+IRQC_PINS_MUX(322, 28);
+IRQC_PINS_MUX(323, 29);
+IRQC_PINS_MUX(324, 30);
+IRQC_PINS_MUX(325, 53);
+IRQC_PINS_MUX(326, 54);
+IRQC_PINS_MUX(327, 55);
+IRQC_PINS_MUX(328, 56);
+IRQC_PINS_MUX(329, 57);
+/* - SCIFA0 ----------------------------------------------------------------- */
+static const unsigned int scifa0_data_pins[] = {
+ /* SCIFA0_RXD, SCIFA0_TXD */
+ 117, 116,
+};
+static const unsigned int scifa0_data_mux[] = {
+ SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
+};
+static const unsigned int scifa0_clk_pins[] = {
+ /* SCIFA0_SCK */
+ 34,
+};
+static const unsigned int scifa0_clk_mux[] = {
+ SCIFA0_SCK_MARK,
+};
+static const unsigned int scifa0_ctrl_pins[] = {
+ /* SCIFA0_RTS, SCIFA0_CTS */
+ 32, 33,
+};
+static const unsigned int scifa0_ctrl_mux[] = {
+ SCIFA0_RTS_MARK, SCIFA0_CTS_MARK,
+};
+/* - SCIFA1 ----------------------------------------------------------------- */
+static const unsigned int scifa1_data_pins[] = {
+ /* SCIFA1_RXD, SCIFA1_TXD */
+ 119, 118,
+};
+static const unsigned int scifa1_data_mux[] = {
+ SCIFA1_RXD_MARK, SCIFA1_TXD_MARK,
+};
+static const unsigned int scifa1_clk_pins[] = {
+ /* SCIFA1_SCK */
+ 37,
+};
+static const unsigned int scifa1_clk_mux[] = {
+ SCIFA1_SCK_MARK,
+};
+static const unsigned int scifa1_ctrl_pins[] = {
+ /* SCIFA1_RTS, SCIFA1_CTS */
+ 35, 36,
+};
+static const unsigned int scifa1_ctrl_mux[] = {
+ SCIFA1_RTS_MARK, SCIFA1_CTS_MARK,
+};
+/* - SCIFB0 ----------------------------------------------------------------- */
+static const unsigned int scifb0_data_pins[] = {
+ /* SCIFB0_RXD, SCIFB0_TXD */
+ 123, 122,
+};
+static const unsigned int scifb0_data_mux[] = {
+ SCIFB0_RXD_MARK, SCIFB0_TXD_MARK,
+};
+static const unsigned int scifb0_clk_pins[] = {
+ /* SCIFB0_SCK */
+ 40,
+};
+static const unsigned int scifb0_clk_mux[] = {
+ SCIFB0_SCK_MARK,
+};
+static const unsigned int scifb0_ctrl_pins[] = {
+ /* SCIFB0_RTS, SCIFB0_CTS */
+ 38, 39,
+};
+static const unsigned int scifb0_ctrl_mux[] = {
+ SCIFB0_RTS_MARK, SCIFB0_CTS_MARK,
+};
+/* - SCIFB1 ----------------------------------------------------------------- */
+static const unsigned int scifb1_data_pins[] = {
+ /* SCIFB1_RXD, SCIFB1_TXD */
+ 27, 26,
+};
+static const unsigned int scifb1_data_mux[] = {
+ SCIFB1_RXD_27_MARK, SCIFB1_TXD_26_MARK,
+};
+static const unsigned int scifb1_clk_pins[] = {
+ /* SCIFB1_SCK */
+ 28,
+};
+static const unsigned int scifb1_clk_mux[] = {
+ SCIFB1_SCK_28_MARK,
+};
+static const unsigned int scifb1_ctrl_pins[] = {
+ /* SCIFB1_RTS, SCIFB1_CTS */
+ 24, 25,
+};
+static const unsigned int scifb1_ctrl_mux[] = {
+ SCIFB1_RTS_24_MARK, SCIFB1_CTS_25_MARK,
+};
+static const unsigned int scifb1_data_b_pins[] = {
+ /* SCIFB1_RXD, SCIFB1_TXD */
+ 72, 67,
+};
+static const unsigned int scifb1_data_b_mux[] = {
+ SCIFB1_RXD_72_MARK, SCIFB1_TXD_67_MARK,
+};
+static const unsigned int scifb1_clk_b_pins[] = {
+ /* SCIFB1_SCK */
+ 261,
+};
+static const unsigned int scifb1_clk_b_mux[] = {
+ SCIFB1_SCK_261_MARK,
+};
+static const unsigned int scifb1_ctrl_b_pins[] = {
+ /* SCIFB1_RTS, SCIFB1_CTS */
+ 70, 71,
+};
+static const unsigned int scifb1_ctrl_b_mux[] = {
+ SCIFB1_RTS_70_MARK, SCIFB1_CTS_71_MARK,
+};
+/* - SCIFB2 ----------------------------------------------------------------- */
+static const unsigned int scifb2_data_pins[] = {
+ /* SCIFB2_RXD, SCIFB2_TXD */
+ 69, 68,
+};
+static const unsigned int scifb2_data_mux[] = {
+ SCIFB2_RXD_69_MARK, SCIFB2_TXD_68_MARK,
+};
+static const unsigned int scifb2_clk_pins[] = {
+ /* SCIFB2_SCK */
+ 262,
+};
+static const unsigned int scifb2_clk_mux[] = {
+ SCIFB2_SCK_262_MARK,
+};
+static const unsigned int scifb2_ctrl_pins[] = {
+ /* SCIFB2_RTS, SCIFB2_CTS */
+ 73, 66,
+};
+static const unsigned int scifb2_ctrl_mux[] = {
+ SCIFB2_RTS_73_MARK, SCIFB2_CTS_66_MARK,
+};
+static const unsigned int scifb2_data_b_pins[] = {
+ /* SCIFB2_RXD, SCIFB2_TXD */
+ 297, 295,
+};
+static const unsigned int scifb2_data_b_mux[] = {
+ SCIFB2_RXD_297_MARK, SCIFB2_TXD_295_MARK,
+};
+static const unsigned int scifb2_clk_b_pins[] = {
+ /* SCIFB2_SCK */
+ 299,
+};
+static const unsigned int scifb2_clk_b_mux[] = {
+ SCIFB2_SCK_299_MARK,
+};
+static const unsigned int scifb2_ctrl_b_pins[] = {
+ /* SCIFB2_RTS, SCIFB2_CTS */
+ 300, 298,
+};
+static const unsigned int scifb2_ctrl_b_mux[] = {
+ SCIFB2_RTS_300_MARK, SCIFB2_CTS_298_MARK,
+};
+/* - SCIFB3 ----------------------------------------------------------------- */
+static const unsigned int scifb3_data_pins[] = {
+ /* SCIFB3_RXD, SCIFB3_TXD */
+ 22, 21,
+};
+static const unsigned int scifb3_data_mux[] = {
+ SCIFB3_RXD_22_MARK, SCIFB3_TXD_21_MARK,
+};
+static const unsigned int scifb3_clk_pins[] = {
+ /* SCIFB3_SCK */
+ 23,
+};
+static const unsigned int scifb3_clk_mux[] = {
+ SCIFB3_SCK_23_MARK,
+};
+static const unsigned int scifb3_ctrl_pins[] = {
+ /* SCIFB3_RTS, SCIFB3_CTS */
+ 19, 20,
+};
+static const unsigned int scifb3_ctrl_mux[] = {
+ SCIFB3_RTS_19_MARK, SCIFB3_CTS_20_MARK,
+};
+static const unsigned int scifb3_data_b_pins[] = {
+ /* SCIFB3_RXD, SCIFB3_TXD */
+ 120, 121,
+};
+static const unsigned int scifb3_data_b_mux[] = {
+ SCIFB3_RXD_120_MARK, SCIFB3_TXD_121_MARK,
+};
+static const unsigned int scifb3_clk_b_pins[] = {
+ /* SCIFB3_SCK */
+ 40,
+};
+static const unsigned int scifb3_clk_b_mux[] = {
+ SCIFB3_SCK_40_MARK,
+};
+static const unsigned int scifb3_ctrl_b_pins[] = {
+ /* SCIFB3_RTS, SCIFB3_CTS */
+ 38, 39,
+};
+static const unsigned int scifb3_ctrl_b_mux[] = {
+ SCIFB3_RTS_38_MARK, SCIFB3_CTS_39_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(irqc_irq0),
+ SH_PFC_PIN_GROUP(irqc_irq1),
+ SH_PFC_PIN_GROUP(irqc_irq2),
+ SH_PFC_PIN_GROUP(irqc_irq3),
+ SH_PFC_PIN_GROUP(irqc_irq4),
+ SH_PFC_PIN_GROUP(irqc_irq5),
+ SH_PFC_PIN_GROUP(irqc_irq6),
+ SH_PFC_PIN_GROUP(irqc_irq7),
+ SH_PFC_PIN_GROUP(irqc_irq8),
+ SH_PFC_PIN_GROUP(irqc_irq9),
+ SH_PFC_PIN_GROUP(irqc_irq10),
+ SH_PFC_PIN_GROUP(irqc_irq11),
+ SH_PFC_PIN_GROUP(irqc_irq12),
+ SH_PFC_PIN_GROUP(irqc_irq13),
+ SH_PFC_PIN_GROUP(irqc_irq14),
+ SH_PFC_PIN_GROUP(irqc_irq15),
+ SH_PFC_PIN_GROUP(irqc_irq16),
+ SH_PFC_PIN_GROUP(irqc_irq17),
+ SH_PFC_PIN_GROUP(irqc_irq18),
+ SH_PFC_PIN_GROUP(irqc_irq19),
+ SH_PFC_PIN_GROUP(irqc_irq20),
+ SH_PFC_PIN_GROUP(irqc_irq21),
+ SH_PFC_PIN_GROUP(irqc_irq22),
+ SH_PFC_PIN_GROUP(irqc_irq23),
+ SH_PFC_PIN_GROUP(irqc_irq24),
+ SH_PFC_PIN_GROUP(irqc_irq25),
+ SH_PFC_PIN_GROUP(irqc_irq26),
+ SH_PFC_PIN_GROUP(irqc_irq27),
+ SH_PFC_PIN_GROUP(irqc_irq28),
+ SH_PFC_PIN_GROUP(irqc_irq29),
+ SH_PFC_PIN_GROUP(irqc_irq30),
+ SH_PFC_PIN_GROUP(irqc_irq31),
+ SH_PFC_PIN_GROUP(irqc_irq32),
+ SH_PFC_PIN_GROUP(irqc_irq33),
+ SH_PFC_PIN_GROUP(irqc_irq34),
+ SH_PFC_PIN_GROUP(irqc_irq35),
+ SH_PFC_PIN_GROUP(irqc_irq36),
+ SH_PFC_PIN_GROUP(irqc_irq37),
+ SH_PFC_PIN_GROUP(irqc_irq38),
+ SH_PFC_PIN_GROUP(irqc_irq39),
+ SH_PFC_PIN_GROUP(irqc_irq40),
+ SH_PFC_PIN_GROUP(irqc_irq41),
+ SH_PFC_PIN_GROUP(irqc_irq42),
+ SH_PFC_PIN_GROUP(irqc_irq43),
+ SH_PFC_PIN_GROUP(irqc_irq44),
+ SH_PFC_PIN_GROUP(irqc_irq45),
+ SH_PFC_PIN_GROUP(irqc_irq46),
+ SH_PFC_PIN_GROUP(irqc_irq47),
+ SH_PFC_PIN_GROUP(irqc_irq48),
+ SH_PFC_PIN_GROUP(irqc_irq49),
+ SH_PFC_PIN_GROUP(irqc_irq50),
+ SH_PFC_PIN_GROUP(irqc_irq51),
+ SH_PFC_PIN_GROUP(irqc_irq52),
+ SH_PFC_PIN_GROUP(irqc_irq53),
+ SH_PFC_PIN_GROUP(irqc_irq54),
+ SH_PFC_PIN_GROUP(irqc_irq55),
+ SH_PFC_PIN_GROUP(irqc_irq56),
+ SH_PFC_PIN_GROUP(irqc_irq57),
+ SH_PFC_PIN_GROUP(scifa0_data),
+ SH_PFC_PIN_GROUP(scifa0_clk),
+ SH_PFC_PIN_GROUP(scifa0_ctrl),
+ SH_PFC_PIN_GROUP(scifa1_data),
+ SH_PFC_PIN_GROUP(scifa1_clk),
+ SH_PFC_PIN_GROUP(scifa1_ctrl),
+ SH_PFC_PIN_GROUP(scifb0_data),
+ SH_PFC_PIN_GROUP(scifb0_clk),
+ SH_PFC_PIN_GROUP(scifb0_ctrl),
+ SH_PFC_PIN_GROUP(scifb1_data),
+ SH_PFC_PIN_GROUP(scifb1_clk),
+ SH_PFC_PIN_GROUP(scifb1_ctrl),
+ SH_PFC_PIN_GROUP(scifb1_data_b),
+ SH_PFC_PIN_GROUP(scifb1_clk_b),
+ SH_PFC_PIN_GROUP(scifb1_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb2_data),
+ SH_PFC_PIN_GROUP(scifb2_clk),
+ SH_PFC_PIN_GROUP(scifb2_ctrl),
+ SH_PFC_PIN_GROUP(scifb2_data_b),
+ SH_PFC_PIN_GROUP(scifb2_clk_b),
+ SH_PFC_PIN_GROUP(scifb2_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb3_data),
+ SH_PFC_PIN_GROUP(scifb3_clk),
+ SH_PFC_PIN_GROUP(scifb3_ctrl),
+ SH_PFC_PIN_GROUP(scifb3_data_b),
+ SH_PFC_PIN_GROUP(scifb3_clk_b),
+ SH_PFC_PIN_GROUP(scifb3_ctrl_b),
+};
+
+static const char * const irqc_groups[] = {
+ "irqc_irq0",
+ "irqc_irq1",
+ "irqc_irq2",
+ "irqc_irq3",
+ "irqc_irq4",
+ "irqc_irq5",
+ "irqc_irq6",
+ "irqc_irq7",
+ "irqc_irq8",
+ "irqc_irq9",
+ "irqc_irq10",
+ "irqc_irq11",
+ "irqc_irq12",
+ "irqc_irq13",
+ "irqc_irq14",
+ "irqc_irq15",
+ "irqc_irq16",
+ "irqc_irq17",
+ "irqc_irq18",
+ "irqc_irq19",
+ "irqc_irq20",
+ "irqc_irq21",
+ "irqc_irq22",
+ "irqc_irq23",
+ "irqc_irq24",
+ "irqc_irq25",
+ "irqc_irq26",
+ "irqc_irq27",
+ "irqc_irq28",
+ "irqc_irq29",
+ "irqc_irq30",
+ "irqc_irq31",
+ "irqc_irq32",
+ "irqc_irq33",
+ "irqc_irq34",
+ "irqc_irq35",
+ "irqc_irq36",
+ "irqc_irq37",
+ "irqc_irq38",
+ "irqc_irq39",
+ "irqc_irq40",
+ "irqc_irq41",
+ "irqc_irq42",
+ "irqc_irq43",
+ "irqc_irq44",
+ "irqc_irq45",
+ "irqc_irq46",
+ "irqc_irq47",
+ "irqc_irq48",
+ "irqc_irq49",
+ "irqc_irq50",
+ "irqc_irq51",
+ "irqc_irq52",
+ "irqc_irq53",
+ "irqc_irq54",
+ "irqc_irq55",
+ "irqc_irq56",
+ "irqc_irq57",
+};
+
+static const char * const scifa0_groups[] = {
+ "scifa0_data",
+ "scifa0_clk",
+ "scifa0_ctrl",
+};
+
+static const char * const scifa1_groups[] = {
+ "scifa1_data",
+ "scifa1_clk",
+ "scifa1_ctrl",
+};
+
+static const char * const scifb0_groups[] = {
+ "scifb0_data",
+ "scifb0_clk",
+ "scifb0_ctrl",
+};
+
+static const char * const scifb1_groups[] = {
+ "scifb1_data",
+ "scifb1_clk",
+ "scifb1_ctrl",
+ "scifb1_data_b",
+ "scifb1_clk_b",
+ "scifb1_ctrl_b",
+};
+
+static const char * const scifb2_groups[] = {
+ "scifb2_data",
+ "scifb2_clk",
+ "scifb2_ctrl",
+ "scifb2_data_b",
+ "scifb2_clk_b",
+ "scifb2_ctrl_b",
+};
+
+static const char * const scifb3_groups[] = {
+ "scifb3_data",
+ "scifb3_clk",
+ "scifb3_ctrl",
+ "scifb3_data_b",
+ "scifb3_clk_b",
+ "scifb3_ctrl_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(irqc),
+ SH_PFC_FUNCTION(scifa0),
+ SH_PFC_FUNCTION(scifa1),
+ SH_PFC_FUNCTION(scifb0),
+ SH_PFC_FUNCTION(scifb1),
+ SH_PFC_FUNCTION(scifb2),
+ SH_PFC_FUNCTION(scifb3),
+};
+
+#undef PORTCR
+#define PORTCR(nr, reg) \
+ { \
+ PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
+ _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
+ PORT##nr##_FN0, PORT##nr##_FN1, \
+ PORT##nr##_FN2, PORT##nr##_FN3, \
+ PORT##nr##_FN4, PORT##nr##_FN5, \
+ PORT##nr##_FN6, PORT##nr##_FN7 } \
+ }
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xe6050000),
+ PORTCR(1, 0xe6050001),
+ PORTCR(2, 0xe6050002),
+ PORTCR(3, 0xe6050003),
+ PORTCR(4, 0xe6050004),
+ PORTCR(5, 0xe6050005),
+ PORTCR(6, 0xe6050006),
+ PORTCR(7, 0xe6050007),
+ PORTCR(8, 0xe6050008),
+ PORTCR(9, 0xe6050009),
+ PORTCR(10, 0xe605000A),
+ PORTCR(11, 0xe605000B),
+ PORTCR(12, 0xe605000C),
+ PORTCR(13, 0xe605000D),
+ PORTCR(14, 0xe605000E),
+ PORTCR(15, 0xe605000F),
+ PORTCR(16, 0xe6050010),
+ PORTCR(17, 0xe6050011),
+ PORTCR(18, 0xe6050012),
+ PORTCR(19, 0xe6050013),
+ PORTCR(20, 0xe6050014),
+ PORTCR(21, 0xe6050015),
+ PORTCR(22, 0xe6050016),
+ PORTCR(23, 0xe6050017),
+ PORTCR(24, 0xe6050018),
+ PORTCR(25, 0xe6050019),
+ PORTCR(26, 0xe605001A),
+ PORTCR(27, 0xe605001B),
+ PORTCR(28, 0xe605001C),
+ PORTCR(29, 0xe605001D),
+ PORTCR(30, 0xe605001E),
+ PORTCR(32, 0xe6051020),
+ PORTCR(33, 0xe6051021),
+ PORTCR(34, 0xe6051022),
+ PORTCR(35, 0xe6051023),
+ PORTCR(36, 0xe6051024),
+ PORTCR(37, 0xe6051025),
+ PORTCR(38, 0xe6051026),
+ PORTCR(39, 0xe6051027),
+ PORTCR(40, 0xe6051028),
+ PORTCR(64, 0xe6050040),
+ PORTCR(65, 0xe6050041),
+ PORTCR(66, 0xe6050042),
+ PORTCR(67, 0xe6050043),
+ PORTCR(68, 0xe6050044),
+ PORTCR(69, 0xe6050045),
+ PORTCR(70, 0xe6050046),
+ PORTCR(71, 0xe6050047),
+ PORTCR(72, 0xe6050048),
+ PORTCR(73, 0xe6050049),
+ PORTCR(74, 0xe605004A),
+ PORTCR(75, 0xe605004B),
+ PORTCR(76, 0xe605004C),
+ PORTCR(77, 0xe605004D),
+ PORTCR(78, 0xe605004E),
+ PORTCR(79, 0xe605004F),
+ PORTCR(80, 0xe6050050),
+ PORTCR(81, 0xe6050051),
+ PORTCR(82, 0xe6050052),
+ PORTCR(83, 0xe6050053),
+ PORTCR(84, 0xe6050054),
+ PORTCR(85, 0xe6050055),
+ PORTCR(96, 0xe6051060),
+ PORTCR(97, 0xe6051061),
+ PORTCR(98, 0xe6051062),
+ PORTCR(99, 0xe6051063),
+ PORTCR(100, 0xe6051064),
+ PORTCR(101, 0xe6051065),
+ PORTCR(102, 0xe6051066),
+ PORTCR(103, 0xe6051067),
+ PORTCR(104, 0xe6051068),
+ PORTCR(105, 0xe6051069),
+ PORTCR(106, 0xe605106A),
+ PORTCR(107, 0xe605106B),
+ PORTCR(108, 0xe605106C),
+ PORTCR(109, 0xe605106D),
+ PORTCR(110, 0xe605106E),
+ PORTCR(111, 0xe605106F),
+ PORTCR(112, 0xe6051070),
+ PORTCR(113, 0xe6051071),
+ PORTCR(114, 0xe6051072),
+ PORTCR(115, 0xe6051073),
+ PORTCR(116, 0xe6051074),
+ PORTCR(117, 0xe6051075),
+ PORTCR(118, 0xe6051076),
+ PORTCR(119, 0xe6051077),
+ PORTCR(120, 0xe6051078),
+ PORTCR(121, 0xe6051079),
+ PORTCR(122, 0xe605107A),
+ PORTCR(123, 0xe605107B),
+ PORTCR(124, 0xe605107C),
+ PORTCR(125, 0xe605107D),
+ PORTCR(126, 0xe605107E),
+ PORTCR(128, 0xe6051080),
+ PORTCR(129, 0xe6051081),
+ PORTCR(130, 0xe6051082),
+ PORTCR(131, 0xe6051083),
+ PORTCR(132, 0xe6051084),
+ PORTCR(133, 0xe6051085),
+ PORTCR(134, 0xe6051086),
+ PORTCR(160, 0xe60520A0),
+ PORTCR(161, 0xe60520A1),
+ PORTCR(162, 0xe60520A2),
+ PORTCR(163, 0xe60520A3),
+ PORTCR(164, 0xe60520A4),
+ PORTCR(165, 0xe60520A5),
+ PORTCR(166, 0xe60520A6),
+ PORTCR(167, 0xe60520A7),
+ PORTCR(168, 0xe60520A8),
+ PORTCR(169, 0xe60520A9),
+ PORTCR(170, 0xe60520AA),
+ PORTCR(171, 0xe60520AB),
+ PORTCR(172, 0xe60520AC),
+ PORTCR(173, 0xe60520AD),
+ PORTCR(174, 0xe60520AE),
+ PORTCR(175, 0xe60520AF),
+ PORTCR(176, 0xe60520B0),
+ PORTCR(177, 0xe60520B1),
+ PORTCR(178, 0xe60520B2),
+ PORTCR(192, 0xe60520C0),
+ PORTCR(193, 0xe60520C1),
+ PORTCR(194, 0xe60520C2),
+ PORTCR(195, 0xe60520C3),
+ PORTCR(196, 0xe60520C4),
+ PORTCR(197, 0xe60520C5),
+ PORTCR(198, 0xe60520C6),
+ PORTCR(199, 0xe60520C7),
+ PORTCR(200, 0xe60520C8),
+ PORTCR(201, 0xe60520C9),
+ PORTCR(202, 0xe60520CA),
+ PORTCR(203, 0xe60520CB),
+ PORTCR(204, 0xe60520CC),
+ PORTCR(205, 0xe60520CD),
+ PORTCR(206, 0xe60520CE),
+ PORTCR(207, 0xe60520CF),
+ PORTCR(208, 0xe60520D0),
+ PORTCR(209, 0xe60520D1),
+ PORTCR(210, 0xe60520D2),
+ PORTCR(211, 0xe60520D3),
+ PORTCR(212, 0xe60520D4),
+ PORTCR(213, 0xe60520D5),
+ PORTCR(214, 0xe60520D6),
+ PORTCR(215, 0xe60520D7),
+ PORTCR(216, 0xe60520D8),
+ PORTCR(217, 0xe60520D9),
+ PORTCR(218, 0xe60520DA),
+ PORTCR(219, 0xe60520DB),
+ PORTCR(220, 0xe60520DC),
+ PORTCR(221, 0xe60520DD),
+ PORTCR(222, 0xe60520DE),
+ PORTCR(224, 0xe60520E0),
+ PORTCR(225, 0xe60520E1),
+ PORTCR(226, 0xe60520E2),
+ PORTCR(227, 0xe60520E3),
+ PORTCR(228, 0xe60520E4),
+ PORTCR(229, 0xe60520E5),
+ PORTCR(230, 0xe60520e6),
+ PORTCR(231, 0xe60520E7),
+ PORTCR(232, 0xe60520E8),
+ PORTCR(233, 0xe60520E9),
+ PORTCR(234, 0xe60520EA),
+ PORTCR(235, 0xe60520EB),
+ PORTCR(236, 0xe60520EC),
+ PORTCR(237, 0xe60520ED),
+ PORTCR(238, 0xe60520EE),
+ PORTCR(239, 0xe60520EF),
+ PORTCR(240, 0xe60520F0),
+ PORTCR(241, 0xe60520F1),
+ PORTCR(242, 0xe60520F2),
+ PORTCR(243, 0xe60520F3),
+ PORTCR(244, 0xe60520F4),
+ PORTCR(245, 0xe60520F5),
+ PORTCR(246, 0xe60520F6),
+ PORTCR(247, 0xe60520F7),
+ PORTCR(248, 0xe60520F8),
+ PORTCR(249, 0xe60520F9),
+ PORTCR(250, 0xe60520FA),
+ PORTCR(256, 0xe6052100),
+ PORTCR(257, 0xe6052101),
+ PORTCR(258, 0xe6052102),
+ PORTCR(259, 0xe6052103),
+ PORTCR(260, 0xe6052104),
+ PORTCR(261, 0xe6052105),
+ PORTCR(262, 0xe6052106),
+ PORTCR(263, 0xe6052107),
+ PORTCR(264, 0xe6052108),
+ PORTCR(265, 0xe6052109),
+ PORTCR(266, 0xe605210A),
+ PORTCR(267, 0xe605210B),
+ PORTCR(268, 0xe605210C),
+ PORTCR(269, 0xe605210D),
+ PORTCR(270, 0xe605210E),
+ PORTCR(271, 0xe605210F),
+ PORTCR(272, 0xe6052110),
+ PORTCR(273, 0xe6052111),
+ PORTCR(274, 0xe6052112),
+ PORTCR(275, 0xe6052113),
+ PORTCR(276, 0xe6052114),
+ PORTCR(277, 0xe6052115),
+ PORTCR(278, 0xe6052116),
+ PORTCR(279, 0xe6052117),
+ PORTCR(280, 0xe6052118),
+ PORTCR(281, 0xe6052119),
+ PORTCR(282, 0xe605211A),
+ PORTCR(283, 0xe605211B),
+ PORTCR(288, 0xe6053120),
+ PORTCR(289, 0xe6053121),
+ PORTCR(290, 0xe6053122),
+ PORTCR(291, 0xe6053123),
+ PORTCR(292, 0xe6053124),
+ PORTCR(293, 0xe6053125),
+ PORTCR(294, 0xe6053126),
+ PORTCR(295, 0xe6053127),
+ PORTCR(296, 0xe6053128),
+ PORTCR(297, 0xe6053129),
+ PORTCR(298, 0xe605312A),
+ PORTCR(299, 0xe605312B),
+ PORTCR(300, 0xe605312C),
+ PORTCR(301, 0xe605312D),
+ PORTCR(302, 0xe605312E),
+ PORTCR(303, 0xe605312F),
+ PORTCR(304, 0xe6053130),
+ PORTCR(305, 0xe6053131),
+ PORTCR(306, 0xe6053132),
+ PORTCR(307, 0xe6053133),
+ PORTCR(308, 0xe6053134),
+ PORTCR(320, 0xe6053140),
+ PORTCR(321, 0xe6053141),
+ PORTCR(322, 0xe6053142),
+ PORTCR(323, 0xe6053143),
+ PORTCR(324, 0xe6053144),
+ PORTCR(325, 0xe6053145),
+ PORTCR(326, 0xe6053146),
+ PORTCR(327, 0xe6053147),
+ PORTCR(328, 0xe6053148),
+ PORTCR(329, 0xe6053149),
+
+ { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) {
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ 0, 0,
+ MSEL1CR_25_0, MSEL1CR_25_1,
+ MSEL1CR_24_0, MSEL1CR_24_1,
+ 0, 0,
+ MSEL1CR_22_0, MSEL1CR_22_1,
+ MSEL1CR_21_0, MSEL1CR_21_1,
+ MSEL1CR_20_0, MSEL1CR_20_1,
+ MSEL1CR_19_0, MSEL1CR_19_1,
+ MSEL1CR_18_0, MSEL1CR_18_1,
+ MSEL1CR_17_0, MSEL1CR_17_1,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ MSEL1CR_11_0, MSEL1CR_11_1,
+ MSEL1CR_10_0, MSEL1CR_10_1,
+ MSEL1CR_09_0, MSEL1CR_09_1,
+ MSEL1CR_08_0, MSEL1CR_08_1,
+ MSEL1CR_07_0, MSEL1CR_07_1,
+ MSEL1CR_06_0, MSEL1CR_06_1,
+ MSEL1CR_05_0, MSEL1CR_05_1,
+ MSEL1CR_04_0, MSEL1CR_04_1,
+ MSEL1CR_03_0, MSEL1CR_03_1,
+ MSEL1CR_02_0, MSEL1CR_02_1,
+ MSEL1CR_01_0, MSEL1CR_01_1,
+ MSEL1CR_00_0, MSEL1CR_00_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+ MSEL3CR_31_0, MSEL3CR_31_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_28_0, MSEL3CR_28_1,
+ MSEL3CR_27_0, MSEL3CR_27_1,
+ MSEL3CR_26_0, MSEL3CR_26_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_23_0, MSEL3CR_23_1,
+ MSEL3CR_22_0, MSEL3CR_22_1,
+ MSEL3CR_21_0, MSEL3CR_21_1,
+ MSEL3CR_20_0, MSEL3CR_20_1,
+ MSEL3CR_19_0, MSEL3CR_19_1,
+ MSEL3CR_18_0, MSEL3CR_18_1,
+ MSEL3CR_17_0, MSEL3CR_17_1,
+ MSEL3CR_16_0, MSEL3CR_16_1,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_12_0, MSEL3CR_12_1,
+ MSEL3CR_11_0, MSEL3CR_11_1,
+ MSEL3CR_10_0, MSEL3CR_10_1,
+ MSEL3CR_09_0, MSEL3CR_09_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_06_0, MSEL3CR_06_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_03_0, MSEL3CR_03_1,
+ 0, 0,
+ MSEL3CR_01_0, MSEL3CR_01_1,
+ MSEL3CR_00_0, MSEL3CR_00_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+ 0, 0,
+ MSEL4CR_30_0, MSEL4CR_30_1,
+ MSEL4CR_29_0, MSEL4CR_29_1,
+ MSEL4CR_28_0, MSEL4CR_28_1,
+ MSEL4CR_27_0, MSEL4CR_27_1,
+ MSEL4CR_26_0, MSEL4CR_26_1,
+ MSEL4CR_25_0, MSEL4CR_25_1,
+ MSEL4CR_24_0, MSEL4CR_24_1,
+ MSEL4CR_23_0, MSEL4CR_23_1,
+ MSEL4CR_22_0, MSEL4CR_22_1,
+ MSEL4CR_21_0, MSEL4CR_21_1,
+ MSEL4CR_20_0, MSEL4CR_20_1,
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_17_0, MSEL4CR_17_1,
+ MSEL4CR_16_0, MSEL4CR_16_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_14_0, MSEL4CR_14_1,
+ MSEL4CR_13_0, MSEL4CR_13_1,
+ MSEL4CR_12_0, MSEL4CR_12_1,
+ MSEL4CR_11_0, MSEL4CR_11_1,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ MSEL4CR_09_0, MSEL4CR_09_1,
+ 0, 0,
+ MSEL4CR_07_0, MSEL4CR_07_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_04_0, MSEL4CR_04_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_01_0, MSEL4CR_01_1,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL5CR", 0xe6058028, 32, 1) {
+ MSEL5CR_31_0, MSEL5CR_31_1,
+ MSEL5CR_30_0, MSEL5CR_30_1,
+ MSEL5CR_29_0, MSEL5CR_29_1,
+ MSEL5CR_28_0, MSEL5CR_28_1,
+ MSEL5CR_27_0, MSEL5CR_27_1,
+ MSEL5CR_26_0, MSEL5CR_26_1,
+ MSEL5CR_25_0, MSEL5CR_25_1,
+ MSEL5CR_24_0, MSEL5CR_24_1,
+ MSEL5CR_23_0, MSEL5CR_23_1,
+ MSEL5CR_22_0, MSEL5CR_22_1,
+ MSEL5CR_21_0, MSEL5CR_21_1,
+ MSEL5CR_20_0, MSEL5CR_20_1,
+ MSEL5CR_19_0, MSEL5CR_19_1,
+ MSEL5CR_18_0, MSEL5CR_18_1,
+ MSEL5CR_17_0, MSEL5CR_17_1,
+ MSEL5CR_16_0, MSEL5CR_16_1,
+ MSEL5CR_15_0, MSEL5CR_15_1,
+ MSEL5CR_14_0, MSEL5CR_14_1,
+ MSEL5CR_13_0, MSEL5CR_13_1,
+ MSEL5CR_12_0, MSEL5CR_12_1,
+ MSEL5CR_11_0, MSEL5CR_11_1,
+ MSEL5CR_10_0, MSEL5CR_10_1,
+ MSEL5CR_09_0, MSEL5CR_09_1,
+ MSEL5CR_08_0, MSEL5CR_08_1,
+ MSEL5CR_07_0, MSEL5CR_07_1,
+ MSEL5CR_06_0, MSEL5CR_06_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL8CR", 0xe6058034, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL8CR_16_0, MSEL8CR_16_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL8CR_01_0, MSEL8CR_01_1,
+ MSEL8CR_00_0, MSEL8CR_00_1,
+ }
+ },
+ { },
+};
+
+static const struct pinmux_data_reg pinmux_data_regs[] = {
+
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+ 0, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054004, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT85_DATA, PORT84_DATA,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD127_096DR", 0xe6055004, 32) {
+ 0, PORT126_DATA, PORT125_DATA, PORT124_DATA,
+ PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA,
+ PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD159_128DR", 0xe6055008, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056000, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT178_DATA, PORT177_DATA, PORT176_DATA,
+ PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
+ PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
+ PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056004, 32) {
+ 0, PORT222_DATA, PORT221_DATA, PORT220_DATA,
+ PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
+ PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
+ PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA,
+ PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+ PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+ PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR255_224DR", 0xe6056008, 32) {
+ 0, 0, 0, 0,
+ 0, PORT250_DATA, PORT249_DATA, PORT248_DATA,
+ PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
+ PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA,
+ PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA,
+ PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
+ PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
+ PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR287_256DR", 0xe605600C, 32) {
+ 0, 0, 0, 0,
+ PORT283_DATA, PORT282_DATA, PORT281_DATA, PORT280_DATA,
+ PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
+ PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA,
+ PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA,
+ PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
+ PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
+ PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTU319_288DR", 0xe6057000, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT308_DATA,
+ PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA,
+ PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA,
+ PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
+ PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
+ PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTU351_320DR", 0xe6057004, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT329_DATA, PORT328_DATA,
+ PORT327_DATA, PORT326_DATA, PORT325_DATA, PORT324_DATA,
+ PORT323_DATA, PORT322_DATA, PORT321_DATA, PORT320_DATA,
+ }
+ },
+ { },
+};
+
+static const struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(irq_pin(0), 0),
+ PINMUX_IRQ(irq_pin(1), 1),
+ PINMUX_IRQ(irq_pin(2), 2),
+ PINMUX_IRQ(irq_pin(3), 3),
+ PINMUX_IRQ(irq_pin(4), 4),
+ PINMUX_IRQ(irq_pin(5), 5),
+ PINMUX_IRQ(irq_pin(6), 6),
+ PINMUX_IRQ(irq_pin(7), 7),
+ PINMUX_IRQ(irq_pin(8), 8),
+ PINMUX_IRQ(irq_pin(9), 9),
+ PINMUX_IRQ(irq_pin(10), 10),
+ PINMUX_IRQ(irq_pin(11), 11),
+ PINMUX_IRQ(irq_pin(12), 12),
+ PINMUX_IRQ(irq_pin(13), 13),
+ PINMUX_IRQ(irq_pin(14), 14),
+ PINMUX_IRQ(irq_pin(15), 15),
+ PINMUX_IRQ(irq_pin(16), 320),
+ PINMUX_IRQ(irq_pin(17), 321),
+ PINMUX_IRQ(irq_pin(18), 85),
+ PINMUX_IRQ(irq_pin(19), 84),
+ PINMUX_IRQ(irq_pin(20), 160),
+ PINMUX_IRQ(irq_pin(21), 161),
+ PINMUX_IRQ(irq_pin(22), 162),
+ PINMUX_IRQ(irq_pin(23), 163),
+ PINMUX_IRQ(irq_pin(24), 175),
+ PINMUX_IRQ(irq_pin(25), 176),
+ PINMUX_IRQ(irq_pin(26), 177),
+ PINMUX_IRQ(irq_pin(27), 178),
+ PINMUX_IRQ(irq_pin(28), 322),
+ PINMUX_IRQ(irq_pin(29), 323),
+ PINMUX_IRQ(irq_pin(30), 324),
+ PINMUX_IRQ(irq_pin(31), 192),
+ PINMUX_IRQ(irq_pin(32), 193),
+ PINMUX_IRQ(irq_pin(33), 194),
+ PINMUX_IRQ(irq_pin(34), 195),
+ PINMUX_IRQ(irq_pin(35), 196),
+ PINMUX_IRQ(irq_pin(36), 197),
+ PINMUX_IRQ(irq_pin(37), 198),
+ PINMUX_IRQ(irq_pin(38), 199),
+ PINMUX_IRQ(irq_pin(39), 200),
+ PINMUX_IRQ(irq_pin(40), 66),
+ PINMUX_IRQ(irq_pin(41), 102),
+ PINMUX_IRQ(irq_pin(42), 103),
+ PINMUX_IRQ(irq_pin(43), 109),
+ PINMUX_IRQ(irq_pin(44), 110),
+ PINMUX_IRQ(irq_pin(45), 111),
+ PINMUX_IRQ(irq_pin(46), 112),
+ PINMUX_IRQ(irq_pin(47), 113),
+ PINMUX_IRQ(irq_pin(48), 114),
+ PINMUX_IRQ(irq_pin(49), 115),
+ PINMUX_IRQ(irq_pin(50), 301),
+ PINMUX_IRQ(irq_pin(51), 290),
+ PINMUX_IRQ(irq_pin(52), 296),
+ PINMUX_IRQ(irq_pin(53), 325),
+ PINMUX_IRQ(irq_pin(54), 326),
+ PINMUX_IRQ(irq_pin(55), 327),
+ PINMUX_IRQ(irq_pin(56), 328),
+ PINMUX_IRQ(irq_pin(57), 329),
+};
+
+#define PORTCR_PULMD_OFF (0 << 6)
+#define PORTCR_PULMD_DOWN (2 << 6)
+#define PORTCR_PULMD_UP (3 << 6)
+#define PORTCR_PULMD_MASK (3 << 6)
+
+static const unsigned int r8a73a4_portcr_offsets[] = {
+ 0x00000000, 0x00001000, 0x00000000, 0x00001000,
+ 0x00001000, 0x00002000, 0x00002000, 0x00002000,
+ 0x00002000, 0x00003000, 0x00003000,
+};
+
+static unsigned int r8a73a4_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ void __iomem *addr;
+
+ addr = pfc->window->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
+
+ switch (ioread8(addr) & PORTCR_PULMD_MASK) {
+ case PORTCR_PULMD_UP:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case PORTCR_PULMD_DOWN:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ case PORTCR_PULMD_OFF:
+ default:
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
+}
+
+static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ void __iomem *addr;
+ u32 value;
+
+ addr = pfc->window->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
+ value = ioread8(addr) & ~PORTCR_PULMD_MASK;
+
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value |= PORTCR_PULMD_UP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value |= PORTCR_PULMD_DOWN;
+ break;
+ }
+
+ iowrite8(value, addr);
+}
+
+static const struct sh_pfc_soc_operations r8a73a4_pinmux_ops = {
+ .get_bias = r8a73a4_pinmux_get_bias,
+ .set_bias = r8a73a4_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
+ .name = "r8a73a4_pfc",
+ .ops = &r8a73a4_pinmux_ops,
+
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+
+ .ranges = pinmux_ranges,
+ .nr_ranges = ARRAY_SIZE(pinmux_ranges),
+
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 214788c..bbd87d2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -577,7 +577,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* specify valid pin states for each pin in GPIO mode */
/* I/O and Pull U/D */
@@ -1654,11 +1654,532 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(TRACEAUD_FROM_MEMC_MARK, MSEL5CR_30_1, MSEL5CR_29_0),
};
-static struct pinmux_gpio pinmux_gpios[] = {
-
- /* PORT */
+static struct sh_pfc_pin pinmux_pins[] = {
GPIO_PORT_ALL(),
+};
+
+/* - LCD0 ------------------------------------------------------------------- */
+static const unsigned int lcd0_data8_pins[] = {
+ /* D[0:7] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+};
+static const unsigned int lcd0_data8_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+};
+static const unsigned int lcd0_data9_pins[] = {
+ /* D[0:8] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+ 50,
+};
+static const unsigned int lcd0_data9_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK,
+};
+static const unsigned int lcd0_data12_pins[] = {
+ /* D[0:11] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+ 50, 49, 48, 47,
+};
+static const unsigned int lcd0_data12_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+};
+static const unsigned int lcd0_data16_pins[] = {
+ /* D[0:15] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+ 50, 49, 48, 47, 46, 45, 44, 43,
+};
+static const unsigned int lcd0_data16_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
+};
+static const unsigned int lcd0_data18_pins[] = {
+ /* D[0:17] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+ 50, 49, 48, 47, 46, 45, 44, 43,
+ 42, 41,
+};
+static const unsigned int lcd0_data18_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
+ LCD0_D16_MARK, LCD0_D17_MARK,
+};
+static const unsigned int lcd0_data24_0_pins[] = {
+ /* D[0:23] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+ 50, 49, 48, 47, 46, 45, 44, 43,
+ 42, 41, 40, 4, 3, 2, 0, 1,
+};
+static const unsigned int lcd0_data24_0_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
+ LCD0_D16_MARK, LCD0_D17_MARK, LCD0_D18_PORT40_MARK, LCD0_D19_PORT4_MARK,
+ LCD0_D20_PORT3_MARK, LCD0_D21_PORT2_MARK, LCD0_D22_PORT0_MARK,
+ LCD0_D23_PORT1_MARK,
+};
+static const unsigned int lcd0_data24_1_pins[] = {
+ /* D[0:23] */
+ 58, 57, 56, 55, 54, 53, 52, 51,
+ 50, 49, 48, 47, 46, 45, 44, 43,
+ 42, 41, 163, 162, 161, 158, 160, 159,
+};
+static const unsigned int lcd0_data24_1_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D16_MARK, LCD0_D17_MARK, LCD0_D18_PORT163_MARK,
+ LCD0_D19_PORT162_MARK, LCD0_D20_PORT161_MARK, LCD0_D21_PORT158_MARK,
+ LCD0_D22_PORT160_MARK, LCD0_D23_PORT159_MARK,
+};
+static const unsigned int lcd0_display_pins[] = {
+ /* DON, VCPWC, VEPWC */
+ 61, 59, 60,
+};
+static const unsigned int lcd0_display_mux[] = {
+ LCD0_DON_MARK, LCD0_VCPWC_MARK, LCD0_VEPWC_MARK,
+};
+static const unsigned int lcd0_lclk_0_pins[] = {
+ /* LCLK */
+ 102,
+};
+static const unsigned int lcd0_lclk_0_mux[] = {
+ LCD0_LCLK_PORT102_MARK,
+};
+static const unsigned int lcd0_lclk_1_pins[] = {
+ /* LCLK */
+ 165,
+};
+static const unsigned int lcd0_lclk_1_mux[] = {
+ LCD0_LCLK_PORT165_MARK,
+};
+static const unsigned int lcd0_sync_pins[] = {
+ /* VSYN, HSYN, DCK, DISP */
+ 63, 64, 62, 65,
+};
+static const unsigned int lcd0_sync_mux[] = {
+ LCD0_VSYN_MARK, LCD0_HSYN_MARK, LCD0_DCK_MARK, LCD0_DISP_MARK,
+};
+static const unsigned int lcd0_sys_pins[] = {
+ /* CS, WR, RD, RS */
+ 64, 62, 164, 65,
+};
+static const unsigned int lcd0_sys_mux[] = {
+ LCD0_CS_MARK, LCD0_WR_MARK, LCD0_RD_MARK, LCD0_RS_MARK,
+};
+/* - LCD1 ------------------------------------------------------------------- */
+static const unsigned int lcd1_data8_pins[] = {
+ /* D[0:7] */
+ 4, 3, 2, 1, 0, 91, 92, 23,
+};
+static const unsigned int lcd1_data8_mux[] = {
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+};
+static const unsigned int lcd1_data9_pins[] = {
+ /* D[0:8] */
+ 4, 3, 2, 1, 0, 91, 92, 23,
+ 93,
+};
+static const unsigned int lcd1_data9_mux[] = {
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK,
+};
+static const unsigned int lcd1_data12_pins[] = {
+ /* D[0:12] */
+ 4, 3, 2, 1, 0, 91, 92, 23,
+ 93, 94, 21, 201,
+};
+static const unsigned int lcd1_data12_mux[] = {
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
+};
+static const unsigned int lcd1_data16_pins[] = {
+ /* D[0:15] */
+ 4, 3, 2, 1, 0, 91, 92, 23,
+ 93, 94, 21, 201, 200, 199, 196, 195,
+};
+static const unsigned int lcd1_data16_mux[] = {
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
+ LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK,
+};
+static const unsigned int lcd1_data18_pins[] = {
+ /* D[0:17] */
+ 4, 3, 2, 1, 0, 91, 92, 23,
+ 93, 94, 21, 201, 200, 199, 196, 195,
+ 194, 193,
+};
+static const unsigned int lcd1_data18_mux[] = {
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
+ LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK,
+ LCD1_D16_MARK, LCD1_D17_MARK,
+};
+static const unsigned int lcd1_data24_pins[] = {
+ /* D[0:23] */
+ 4, 3, 2, 1, 0, 91, 92, 23,
+ 93, 94, 21, 201, 200, 199, 196, 195,
+ 194, 193, 198, 197, 75, 74, 15, 14,
+};
+static const unsigned int lcd1_data24_mux[] = {
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
+ LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK,
+ LCD1_D16_MARK, LCD1_D17_MARK, LCD1_D18_MARK, LCD1_D19_MARK,
+ LCD1_D20_MARK, LCD1_D21_MARK, LCD1_D22_MARK, LCD1_D23_MARK,
+};
+static const unsigned int lcd1_display_pins[] = {
+ /* DON, VCPWC, VEPWC */
+ 100, 5, 6,
+};
+static const unsigned int lcd1_display_mux[] = {
+ LCD1_DON_MARK, LCD1_VCPWC_MARK, LCD1_VEPWC_MARK,
+};
+static const unsigned int lcd1_lclk_pins[] = {
+ /* LCLK */
+ 40,
+};
+static const unsigned int lcd1_lclk_mux[] = {
+ LCD1_LCLK_MARK,
+};
+static const unsigned int lcd1_sync_pins[] = {
+ /* VSYN, HSYN, DCK, DISP */
+ 98, 97, 99, 12,
+};
+static const unsigned int lcd1_sync_mux[] = {
+ LCD1_VSYN_MARK, LCD1_HSYN_MARK, LCD1_DCK_MARK, LCD1_DISP_MARK,
+};
+static const unsigned int lcd1_sys_pins[] = {
+ /* CS, WR, RD, RS */
+ 97, 99, 13, 12,
+};
+static const unsigned int lcd1_sys_mux[] = {
+ LCD1_CS_MARK, LCD1_WR_MARK, LCD1_RD_MARK, LCD1_RS_MARK,
+};
+/* - MMCIF ------------------------------------------------------------------ */
+static const unsigned int mmc0_data1_0_pins[] = {
+ /* D[0] */
+ 68,
+};
+static const unsigned int mmc0_data1_0_mux[] = {
+ MMC0_D0_PORT68_MARK,
+};
+static const unsigned int mmc0_data4_0_pins[] = {
+ /* D[0:3] */
+ 68, 69, 70, 71,
+};
+static const unsigned int mmc0_data4_0_mux[] = {
+ MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK, MMC0_D3_PORT71_MARK,
+};
+static const unsigned int mmc0_data8_0_pins[] = {
+ /* D[0:7] */
+ 68, 69, 70, 71, 72, 73, 74, 75,
+};
+static const unsigned int mmc0_data8_0_mux[] = {
+ MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK, MMC0_D3_PORT71_MARK,
+ MMC0_D4_PORT72_MARK, MMC0_D5_PORT73_MARK, MMC0_D6_PORT74_MARK, MMC0_D7_PORT75_MARK,
+};
+static const unsigned int mmc0_ctrl_0_pins[] = {
+ /* CMD, CLK */
+ 67, 66,
+};
+static const unsigned int mmc0_ctrl_0_mux[] = {
+ MMC0_CMD_PORT67_MARK, MMC0_CLK_PORT66_MARK,
+};
+
+static const unsigned int mmc0_data1_1_pins[] = {
+ /* D[0] */
+ 149,
+};
+static const unsigned int mmc0_data1_1_mux[] = {
+ MMC1_D0_PORT149_MARK,
+};
+static const unsigned int mmc0_data4_1_pins[] = {
+ /* D[0:3] */
+ 149, 148, 147, 146,
+};
+static const unsigned int mmc0_data4_1_mux[] = {
+ MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK, MMC1_D3_PORT146_MARK,
+};
+static const unsigned int mmc0_data8_1_pins[] = {
+ /* D[0:7] */
+ 149, 148, 147, 146, 145, 144, 143, 142,
+};
+static const unsigned int mmc0_data8_1_mux[] = {
+ MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK, MMC1_D3_PORT146_MARK,
+ MMC1_D4_PORT145_MARK, MMC1_D5_PORT144_MARK, MMC1_D6_PORT143_MARK, MMC1_D7_PORT142_MARK,
+};
+static const unsigned int mmc0_ctrl_1_pins[] = {
+ /* CMD, CLK */
+ 104, 103,
+};
+static const unsigned int mmc0_ctrl_1_mux[] = {
+ MMC1_CMD_PORT104_MARK, MMC1_CLK_PORT103_MARK,
+};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ 77,
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SDHI0_D0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ 77, 78, 79, 80,
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SDHI0_D0_MARK, SDHI0_D1_MARK, SDHI0_D2_MARK, SDHI0_D3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CMD, CLK */
+ 76, 82,
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SDHI0_CMD_MARK, SDHI0_CLK_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ 81,
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SDHI0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ 83,
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SDHI0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ 68,
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SDHI1_D0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ 68, 69, 70, 71,
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SDHI1_D0_MARK, SDHI1_D1_MARK, SDHI1_D2_MARK, SDHI1_D3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CMD, CLK */
+ 67, 66,
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SDHI1_CMD_MARK, SDHI1_CLK_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ 72,
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SDHI1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ 73,
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SDHI1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ 205,
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SDHI2_D0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ 205, 206, 207, 208,
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SDHI2_D0_MARK, SDHI2_D1_MARK, SDHI2_D2_MARK, SDHI2_D3_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CMD, CLK */
+ 204, 203,
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SDHI2_CMD_MARK, SDHI2_CLK_MARK,
+};
+static const unsigned int sdhi2_cd_0_pins[] = {
+ /* CD */
+ 202,
+};
+static const unsigned int sdhi2_cd_0_mux[] = {
+ SDHI2_CD_PORT202_MARK,
+};
+static const unsigned int sdhi2_wp_0_pins[] = {
+ /* WP */
+ 177,
+};
+static const unsigned int sdhi2_wp_0_mux[] = {
+ SDHI2_WP_PORT177_MARK,
+};
+static const unsigned int sdhi2_cd_1_pins[] = {
+ /* CD */
+ 24,
+};
+static const unsigned int sdhi2_cd_1_mux[] = {
+ SDHI2_CD_PORT24_MARK,
+};
+static const unsigned int sdhi2_wp_1_pins[] = {
+ /* WP */
+ 25,
+};
+static const unsigned int sdhi2_wp_1_mux[] = {
+ SDHI2_WP_PORT25_MARK,
+};
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(lcd0_data8),
+ SH_PFC_PIN_GROUP(lcd0_data9),
+ SH_PFC_PIN_GROUP(lcd0_data12),
+ SH_PFC_PIN_GROUP(lcd0_data16),
+ SH_PFC_PIN_GROUP(lcd0_data18),
+ SH_PFC_PIN_GROUP(lcd0_data24_0),
+ SH_PFC_PIN_GROUP(lcd0_data24_1),
+ SH_PFC_PIN_GROUP(lcd0_display),
+ SH_PFC_PIN_GROUP(lcd0_lclk_0),
+ SH_PFC_PIN_GROUP(lcd0_lclk_1),
+ SH_PFC_PIN_GROUP(lcd0_sync),
+ SH_PFC_PIN_GROUP(lcd0_sys),
+ SH_PFC_PIN_GROUP(lcd1_data8),
+ SH_PFC_PIN_GROUP(lcd1_data9),
+ SH_PFC_PIN_GROUP(lcd1_data12),
+ SH_PFC_PIN_GROUP(lcd1_data16),
+ SH_PFC_PIN_GROUP(lcd1_data18),
+ SH_PFC_PIN_GROUP(lcd1_data24),
+ SH_PFC_PIN_GROUP(lcd1_display),
+ SH_PFC_PIN_GROUP(lcd1_lclk),
+ SH_PFC_PIN_GROUP(lcd1_sync),
+ SH_PFC_PIN_GROUP(lcd1_sys),
+ SH_PFC_PIN_GROUP(mmc0_data1_0),
+ SH_PFC_PIN_GROUP(mmc0_data4_0),
+ SH_PFC_PIN_GROUP(mmc0_data8_0),
+ SH_PFC_PIN_GROUP(mmc0_ctrl_0),
+ SH_PFC_PIN_GROUP(mmc0_data1_1),
+ SH_PFC_PIN_GROUP(mmc0_data4_1),
+ SH_PFC_PIN_GROUP(mmc0_data8_1),
+ SH_PFC_PIN_GROUP(mmc0_ctrl_1),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_0),
+ SH_PFC_PIN_GROUP(sdhi2_wp_0),
+ SH_PFC_PIN_GROUP(sdhi2_cd_1),
+ SH_PFC_PIN_GROUP(sdhi2_wp_1),
+};
+
+static const char * const lcd0_groups[] = {
+ "lcd0_data8",
+ "lcd0_data9",
+ "lcd0_data12",
+ "lcd0_data16",
+ "lcd0_data18",
+ "lcd0_data24_0",
+ "lcd0_data24_1",
+ "lcd0_display",
+ "lcd0_lclk_0",
+ "lcd0_lclk_1",
+ "lcd0_sync",
+ "lcd0_sys",
+};
+
+static const char * const lcd1_groups[] = {
+ "lcd1_data8",
+ "lcd1_data9",
+ "lcd1_data12",
+ "lcd1_data16",
+ "lcd1_data18",
+ "lcd1_data24",
+ "lcd1_display",
+ "lcd1_lclk",
+ "lcd1_sync",
+ "lcd1_sys",
+};
+
+static const char * const mmc0_groups[] = {
+ "mmc0_data1_0",
+ "mmc0_data4_0",
+ "mmc0_data8_0",
+ "mmc0_ctrl_0",
+ "mmc0_data1_1",
+ "mmc0_data4_1",
+ "mmc0_data8_1",
+ "mmc0_ctrl_1",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_ctrl",
+ "sdhi2_cd_0",
+ "sdhi2_wp_0",
+ "sdhi2_cd_1",
+ "sdhi2_wp_1",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(lcd0),
+ SH_PFC_FUNCTION(lcd1),
+ SH_PFC_FUNCTION(mmc0),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+
+static const struct pinmux_func pinmux_func_gpios[] = {
/* IRQ */
GPIO_FN(IRQ0_PORT2), GPIO_FN(IRQ0_PORT13),
GPIO_FN(IRQ1),
@@ -1792,43 +2313,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(SCIFB_RTS_PORT172),
GPIO_FN(SCIFB_CTS_PORT173),
- /* LCD0 */
- GPIO_FN(LCD0_D0), GPIO_FN(LCD0_D1), GPIO_FN(LCD0_D2),
- GPIO_FN(LCD0_D3), GPIO_FN(LCD0_D4), GPIO_FN(LCD0_D5),
- GPIO_FN(LCD0_D6), GPIO_FN(LCD0_D7), GPIO_FN(LCD0_D8),
- GPIO_FN(LCD0_D9), GPIO_FN(LCD0_D10), GPIO_FN(LCD0_D11),
- GPIO_FN(LCD0_D12), GPIO_FN(LCD0_D13), GPIO_FN(LCD0_D14),
- GPIO_FN(LCD0_D15), GPIO_FN(LCD0_D16), GPIO_FN(LCD0_D17),
- GPIO_FN(LCD0_DON), GPIO_FN(LCD0_VCPWC), GPIO_FN(LCD0_VEPWC),
- GPIO_FN(LCD0_DCK), GPIO_FN(LCD0_VSYN),
- GPIO_FN(LCD0_HSYN), GPIO_FN(LCD0_DISP),
- GPIO_FN(LCD0_WR), GPIO_FN(LCD0_RD),
- GPIO_FN(LCD0_CS), GPIO_FN(LCD0_RS),
-
- GPIO_FN(LCD0_D18_PORT163), GPIO_FN(LCD0_D19_PORT162),
- GPIO_FN(LCD0_D20_PORT161), GPIO_FN(LCD0_D21_PORT158),
- GPIO_FN(LCD0_D22_PORT160), GPIO_FN(LCD0_D23_PORT159),
- GPIO_FN(LCD0_LCLK_PORT165), /* MSEL5CR_6_1 */
-
- GPIO_FN(LCD0_D18_PORT40), GPIO_FN(LCD0_D19_PORT4),
- GPIO_FN(LCD0_D20_PORT3), GPIO_FN(LCD0_D21_PORT2),
- GPIO_FN(LCD0_D22_PORT0), GPIO_FN(LCD0_D23_PORT1),
- GPIO_FN(LCD0_LCLK_PORT102), /* MSEL5CR_6_0 */
-
- /* LCD1 */
- GPIO_FN(LCD1_D0), GPIO_FN(LCD1_D1), GPIO_FN(LCD1_D2),
- GPIO_FN(LCD1_D3), GPIO_FN(LCD1_D4), GPIO_FN(LCD1_D5),
- GPIO_FN(LCD1_D6), GPIO_FN(LCD1_D7), GPIO_FN(LCD1_D8),
- GPIO_FN(LCD1_D9), GPIO_FN(LCD1_D10), GPIO_FN(LCD1_D11),
- GPIO_FN(LCD1_D12), GPIO_FN(LCD1_D13), GPIO_FN(LCD1_D14),
- GPIO_FN(LCD1_D15), GPIO_FN(LCD1_D16), GPIO_FN(LCD1_D17),
- GPIO_FN(LCD1_D18), GPIO_FN(LCD1_D19), GPIO_FN(LCD1_D20),
- GPIO_FN(LCD1_D21), GPIO_FN(LCD1_D22), GPIO_FN(LCD1_D23),
- GPIO_FN(LCD1_RS), GPIO_FN(LCD1_RD), GPIO_FN(LCD1_CS),
- GPIO_FN(LCD1_WR), GPIO_FN(LCD1_DCK), GPIO_FN(LCD1_DON),
- GPIO_FN(LCD1_VCPWC), GPIO_FN(LCD1_LCLK), GPIO_FN(LCD1_HSYN),
- GPIO_FN(LCD1_VSYN), GPIO_FN(LCD1_VEPWC), GPIO_FN(LCD1_DISP),
-
/* RSPI */
GPIO_FN(RSPI_SSL0_A), GPIO_FN(RSPI_SSL1_A), GPIO_FN(RSPI_SSL2_A),
GPIO_FN(RSPI_SSL3_A), GPIO_FN(RSPI_CK_A), GPIO_FN(RSPI_MOSI_A),
@@ -1889,26 +2373,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(SIM_D_PORT22), /* SIM_D Port 22/199 */
GPIO_FN(SIM_D_PORT199),
- /* SDHI0 */
- GPIO_FN(SDHI0_D0), GPIO_FN(SDHI0_D1), GPIO_FN(SDHI0_D2),
- GPIO_FN(SDHI0_D3), GPIO_FN(SDHI0_CD), GPIO_FN(SDHI0_WP),
- GPIO_FN(SDHI0_CMD), GPIO_FN(SDHI0_CLK),
-
- /* SDHI1 */
- GPIO_FN(SDHI1_D0), GPIO_FN(SDHI1_D1), GPIO_FN(SDHI1_D2),
- GPIO_FN(SDHI1_D3), GPIO_FN(SDHI1_CD), GPIO_FN(SDHI1_WP),
- GPIO_FN(SDHI1_CMD), GPIO_FN(SDHI1_CLK),
-
- /* SDHI2 */
- GPIO_FN(SDHI2_D0), GPIO_FN(SDHI2_D1), GPIO_FN(SDHI2_D2),
- GPIO_FN(SDHI2_D3), GPIO_FN(SDHI2_CLK), GPIO_FN(SDHI2_CMD),
-
- GPIO_FN(SDHI2_CD_PORT24), /* MSEL5CR_19_0 */
- GPIO_FN(SDHI2_WP_PORT25),
-
- GPIO_FN(SDHI2_WP_PORT177), /* MSEL5CR_19_1 */
- GPIO_FN(SDHI2_CD_PORT202),
-
/* MSIOF2 */
GPIO_FN(MSIOF2_TXD), GPIO_FN(MSIOF2_RXD), GPIO_FN(MSIOF2_TSCK),
GPIO_FN(MSIOF2_SS2), GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_SS1),
@@ -1953,21 +2417,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(MEMC_WAIT), GPIO_FN(MEMC_DREQ1), GPIO_FN(MEMC_BUSCLK),
GPIO_FN(MEMC_A0),
- /* MMC */
- GPIO_FN(MMC0_D0_PORT68), GPIO_FN(MMC0_D1_PORT69),
- GPIO_FN(MMC0_D2_PORT70), GPIO_FN(MMC0_D3_PORT71),
- GPIO_FN(MMC0_D4_PORT72), GPIO_FN(MMC0_D5_PORT73),
- GPIO_FN(MMC0_D6_PORT74), GPIO_FN(MMC0_D7_PORT75),
- GPIO_FN(MMC0_CLK_PORT66),
- GPIO_FN(MMC0_CMD_PORT67), /* MSEL4CR_15_0 */
-
- GPIO_FN(MMC1_D0_PORT149), GPIO_FN(MMC1_D1_PORT148),
- GPIO_FN(MMC1_D2_PORT147), GPIO_FN(MMC1_D3_PORT146),
- GPIO_FN(MMC1_D4_PORT145), GPIO_FN(MMC1_D5_PORT144),
- GPIO_FN(MMC1_D6_PORT143), GPIO_FN(MMC1_D7_PORT142),
- GPIO_FN(MMC1_CLK_PORT103),
- GPIO_FN(MMC1_CMD_PORT104), /* MSEL4CR_15_1 */
-
/* MSIOF0 */
GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2), GPIO_FN(MSIOF0_RXD),
GPIO_FN(MSIOF0_TXD), GPIO_FN(MSIOF0_MCK0), GPIO_FN(MSIOF0_MCK1),
@@ -2126,7 +2575,7 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(TRACEAUD_FROM_MEMC),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xe6050000), /* PORT0CR */
PORTCR(1, 0xe6050001), /* PORT1CR */
PORTCR(2, 0xe6050002), /* PORT2CR */
@@ -2440,7 +2889,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32) {
PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
@@ -2544,46 +2993,43 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-static struct pinmux_irq pinmux_irqs[] = {
- PINMUX_IRQ(evt2irq(0x0200), PORT2_FN0, PORT13_FN0), /* IRQ0A */
- PINMUX_IRQ(evt2irq(0x0220), PORT20_FN0), /* IRQ1A */
- PINMUX_IRQ(evt2irq(0x0240), PORT11_FN0, PORT12_FN0), /* IRQ2A */
- PINMUX_IRQ(evt2irq(0x0260), PORT10_FN0, PORT14_FN0), /* IRQ3A */
- PINMUX_IRQ(evt2irq(0x0280), PORT15_FN0, PORT172_FN0), /* IRQ4A */
- PINMUX_IRQ(evt2irq(0x02A0), PORT0_FN0, PORT1_FN0), /* IRQ5A */
- PINMUX_IRQ(evt2irq(0x02C0), PORT121_FN0, PORT173_FN0), /* IRQ6A */
- PINMUX_IRQ(evt2irq(0x02E0), PORT120_FN0, PORT209_FN0), /* IRQ7A */
- PINMUX_IRQ(evt2irq(0x0300), PORT119_FN0), /* IRQ8A */
- PINMUX_IRQ(evt2irq(0x0320), PORT118_FN0, PORT210_FN0), /* IRQ9A */
- PINMUX_IRQ(evt2irq(0x0340), PORT19_FN0), /* IRQ10A */
- PINMUX_IRQ(evt2irq(0x0360), PORT104_FN0), /* IRQ11A */
- PINMUX_IRQ(evt2irq(0x0380), PORT42_FN0, PORT97_FN0), /* IRQ12A */
- PINMUX_IRQ(evt2irq(0x03A0), PORT64_FN0, PORT98_FN0), /* IRQ13A */
- PINMUX_IRQ(evt2irq(0x03C0), PORT63_FN0, PORT99_FN0), /* IRQ14A */
- PINMUX_IRQ(evt2irq(0x03E0), PORT62_FN0, PORT100_FN0), /* IRQ15A */
- PINMUX_IRQ(evt2irq(0x3200), PORT68_FN0, PORT211_FN0), /* IRQ16A */
- PINMUX_IRQ(evt2irq(0x3220), PORT69_FN0), /* IRQ17A */
- PINMUX_IRQ(evt2irq(0x3240), PORT70_FN0), /* IRQ18A */
- PINMUX_IRQ(evt2irq(0x3260), PORT71_FN0), /* IRQ19A */
- PINMUX_IRQ(evt2irq(0x3280), PORT67_FN0), /* IRQ20A */
- PINMUX_IRQ(evt2irq(0x32A0), PORT202_FN0), /* IRQ21A */
- PINMUX_IRQ(evt2irq(0x32C0), PORT95_FN0), /* IRQ22A */
- PINMUX_IRQ(evt2irq(0x32E0), PORT96_FN0), /* IRQ23A */
- PINMUX_IRQ(evt2irq(0x3300), PORT180_FN0), /* IRQ24A */
- PINMUX_IRQ(evt2irq(0x3320), PORT38_FN0), /* IRQ25A */
- PINMUX_IRQ(evt2irq(0x3340), PORT58_FN0, PORT81_FN0), /* IRQ26A */
- PINMUX_IRQ(evt2irq(0x3360), PORT57_FN0, PORT168_FN0), /* IRQ27A */
- PINMUX_IRQ(evt2irq(0x3380), PORT56_FN0, PORT169_FN0), /* IRQ28A */
- PINMUX_IRQ(evt2irq(0x33A0), PORT50_FN0, PORT170_FN0), /* IRQ29A */
- PINMUX_IRQ(evt2irq(0x33C0), PORT49_FN0, PORT171_FN0), /* IRQ30A */
- PINMUX_IRQ(evt2irq(0x33E0), PORT41_FN0, PORT167_FN0), /* IRQ31A */
-};
-
-struct sh_pfc_soc_info r8a7740_pinmux_info = {
+static const struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(irq_pin(0), GPIO_PORT2, GPIO_PORT13), /* IRQ0A */
+ PINMUX_IRQ(irq_pin(1), GPIO_PORT20), /* IRQ1A */
+ PINMUX_IRQ(irq_pin(2), GPIO_PORT11, GPIO_PORT12), /* IRQ2A */
+ PINMUX_IRQ(irq_pin(3), GPIO_PORT10, GPIO_PORT14), /* IRQ3A */
+ PINMUX_IRQ(irq_pin(4), GPIO_PORT15, GPIO_PORT172),/* IRQ4A */
+ PINMUX_IRQ(irq_pin(5), GPIO_PORT0, GPIO_PORT1), /* IRQ5A */
+ PINMUX_IRQ(irq_pin(6), GPIO_PORT121, GPIO_PORT173),/* IRQ6A */
+ PINMUX_IRQ(irq_pin(7), GPIO_PORT120, GPIO_PORT209),/* IRQ7A */
+ PINMUX_IRQ(irq_pin(8), GPIO_PORT119), /* IRQ8A */
+ PINMUX_IRQ(irq_pin(9), GPIO_PORT118, GPIO_PORT210),/* IRQ9A */
+ PINMUX_IRQ(irq_pin(10), GPIO_PORT19), /* IRQ10A */
+ PINMUX_IRQ(irq_pin(11), GPIO_PORT104), /* IRQ11A */
+ PINMUX_IRQ(irq_pin(12), GPIO_PORT42, GPIO_PORT97), /* IRQ12A */
+ PINMUX_IRQ(irq_pin(13), GPIO_PORT64, GPIO_PORT98), /* IRQ13A */
+ PINMUX_IRQ(irq_pin(14), GPIO_PORT63, GPIO_PORT99), /* IRQ14A */
+ PINMUX_IRQ(irq_pin(15), GPIO_PORT62, GPIO_PORT100),/* IRQ15A */
+ PINMUX_IRQ(irq_pin(16), GPIO_PORT68, GPIO_PORT211),/* IRQ16A */
+ PINMUX_IRQ(irq_pin(17), GPIO_PORT69), /* IRQ17A */
+ PINMUX_IRQ(irq_pin(18), GPIO_PORT70), /* IRQ18A */
+ PINMUX_IRQ(irq_pin(19), GPIO_PORT71), /* IRQ19A */
+ PINMUX_IRQ(irq_pin(20), GPIO_PORT67), /* IRQ20A */
+ PINMUX_IRQ(irq_pin(21), GPIO_PORT202), /* IRQ21A */
+ PINMUX_IRQ(irq_pin(22), GPIO_PORT95), /* IRQ22A */
+ PINMUX_IRQ(irq_pin(23), GPIO_PORT96), /* IRQ23A */
+ PINMUX_IRQ(irq_pin(24), GPIO_PORT180), /* IRQ24A */
+ PINMUX_IRQ(irq_pin(25), GPIO_PORT38), /* IRQ25A */
+ PINMUX_IRQ(irq_pin(26), GPIO_PORT58, GPIO_PORT81), /* IRQ26A */
+ PINMUX_IRQ(irq_pin(27), GPIO_PORT57, GPIO_PORT168),/* IRQ27A */
+ PINMUX_IRQ(irq_pin(28), GPIO_PORT56, GPIO_PORT169),/* IRQ28A */
+ PINMUX_IRQ(irq_pin(29), GPIO_PORT50, GPIO_PORT170),/* IRQ29A */
+ PINMUX_IRQ(irq_pin(30), GPIO_PORT49, GPIO_PORT171),/* IRQ30A */
+ PINMUX_IRQ(irq_pin(31), GPIO_PORT41, GPIO_PORT167),/* IRQ31A */
+};
+
+const struct sh_pfc_soc_info r8a7740_pinmux_info = {
.name = "r8a7740_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN,
- PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN,
PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
@@ -2592,15 +3038,19 @@ struct sh_pfc_soc_info r8a7740_pinmux_info = {
PINMUX_INPUT_PULLDOWN_END },
.output = { PINMUX_OUTPUT_BEGIN,
PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN,
- PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN,
PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PORT0,
- .last_gpio = GPIO_FN_TRACEAUD_FROM_MEMC,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 13feaa0..791a671 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -19,57 +19,77 @@
*/
#include <linux/kernel.h>
-#include <mach/r8a7779.h>
#include "sh_pfc.h"
-#define CPU_32_PORT(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
- PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
- PORT_1(fn, pfx##31, sfx)
-
-#define CPU_32_PORT6(fn, pfx, sfx) \
- PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
- PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
- PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
- PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
- PORT_1(fn, pfx##8, sfx)
-
-#define CPU_ALL_PORT(fn, pfx, sfx) \
- CPU_32_PORT(fn, pfx##_0_, sfx), \
- CPU_32_PORT(fn, pfx##_1_, sfx), \
- CPU_32_PORT(fn, pfx##_2_, sfx), \
- CPU_32_PORT(fn, pfx##_3_, sfx), \
- CPU_32_PORT(fn, pfx##_4_, sfx), \
- CPU_32_PORT(fn, pfx##_5_, sfx), \
- CPU_32_PORT6(fn, pfx##_6_, sfx)
-
-#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
-#define _GP_DATA(pfx, sfx) PINMUX_DATA(GP##pfx##_DATA, GP##pfx##_FN, \
- GP##pfx##_IN, GP##pfx##_OUT)
-
-#define _GP_INOUTSEL(pfx, sfx) GP##pfx##_IN, GP##pfx##_OUT
-#define _GP_INDT(pfx, sfx) GP##pfx##_DATA
-
-#define GP_ALL(str) CPU_ALL_PORT(_PORT_ALL, GP, str)
-#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
-
-
-#define PORT_10_REV(fn, pfx, sfx) \
- PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
- PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
- PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
- PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
- PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
-
-#define CPU_32_PORT_REV(fn, pfx, sfx) \
- PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
- PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
- PORT_10_REV(fn, pfx, sfx)
-
-#define GP_INOUTSEL(bank) CPU_32_PORT_REV(_GP_INOUTSEL, _##bank##_, unused)
-#define GP_INDT(bank) CPU_32_PORT_REV(_GP_INDT, _##bank##_, unused)
+#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx)
+
+#define PORT_GP_32(bank, fn, sfx) \
+ PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
+ PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
+ PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
+ PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
+ PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
+ PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
+ PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
+ PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
+ PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
+ PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
+ PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
+ PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
+ PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \
+ PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \
+ PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx), \
+ PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx)
+
+#define PORT_GP_32_9(bank, fn, sfx) \
+ PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
+ PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
+ PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
+ PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
+ PORT_GP_1(bank, 8, fn, sfx)
+
+#define PORT_GP_32_REV(bank, fn, sfx) \
+ PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx), \
+ PORT_GP_1(bank, 29, fn, sfx), PORT_GP_1(bank, 28, fn, sfx), \
+ PORT_GP_1(bank, 27, fn, sfx), PORT_GP_1(bank, 26, fn, sfx), \
+ PORT_GP_1(bank, 25, fn, sfx), PORT_GP_1(bank, 24, fn, sfx), \
+ PORT_GP_1(bank, 23, fn, sfx), PORT_GP_1(bank, 22, fn, sfx), \
+ PORT_GP_1(bank, 21, fn, sfx), PORT_GP_1(bank, 20, fn, sfx), \
+ PORT_GP_1(bank, 19, fn, sfx), PORT_GP_1(bank, 18, fn, sfx), \
+ PORT_GP_1(bank, 17, fn, sfx), PORT_GP_1(bank, 16, fn, sfx), \
+ PORT_GP_1(bank, 15, fn, sfx), PORT_GP_1(bank, 14, fn, sfx), \
+ PORT_GP_1(bank, 13, fn, sfx), PORT_GP_1(bank, 12, fn, sfx), \
+ PORT_GP_1(bank, 11, fn, sfx), PORT_GP_1(bank, 10, fn, sfx), \
+ PORT_GP_1(bank, 9, fn, sfx), PORT_GP_1(bank, 8, fn, sfx), \
+ PORT_GP_1(bank, 7, fn, sfx), PORT_GP_1(bank, 6, fn, sfx), \
+ PORT_GP_1(bank, 5, fn, sfx), PORT_GP_1(bank, 4, fn, sfx), \
+ PORT_GP_1(bank, 3, fn, sfx), PORT_GP_1(bank, 2, fn, sfx), \
+ PORT_GP_1(bank, 1, fn, sfx), PORT_GP_1(bank, 0, fn, sfx)
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_32(0, fn, sfx), \
+ PORT_GP_32(1, fn, sfx), \
+ PORT_GP_32(2, fn, sfx), \
+ PORT_GP_32(3, fn, sfx), \
+ PORT_GP_32(4, fn, sfx), \
+ PORT_GP_32(5, fn, sfx), \
+ PORT_GP_32_9(6, fn, sfx)
+
+#define _GP_PORT_ALL(bank, pin, name, sfx) name##_##sfx
+
+#define _GP_GPIO(bank, pin, _name, sfx) \
+ [(bank * 32) + pin] = { \
+ .name = __stringify(_name), \
+ .enum_id = _name##_DATA, \
+ }
+
+#define _GP_DATA(bank, pin, name, sfx) \
+ PINMUX_DATA(name##_DATA, name##_FN)
+
+#define GP_ALL(str) CPU_ALL_PORT(_GP_PORT_ALL, str)
+#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused)
#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
@@ -82,14 +102,6 @@ enum {
GP_ALL(DATA), /* GP_0_0_DATA -> GP_6_8_DATA */
PINMUX_DATA_END,
- PINMUX_INPUT_BEGIN,
- GP_ALL(IN), /* GP_0_0_IN -> GP_6_8_IN */
- PINMUX_INPUT_END,
-
- PINMUX_OUTPUT_BEGIN,
- GP_ALL(OUT), /* GP_0_0_OUT -> GP_6_8_OUT */
- PINMUX_OUTPUT_END,
-
PINMUX_FUNCTION_BEGIN,
GP_ALL(FN), /* GP_0_0_FN -> GP_6_8_FN */
@@ -371,7 +383,7 @@ enum {
FN_VI1_DATA6_VI1_B6, FN_SD2_CD, FN_MT0_VCXO, FN_SPA_TMS,
FN_HSPI_TX1_D, FN_VI1_DATA7_VI1_B7, FN_SD2_WP, FN_MT0_PWM,
FN_SPA_TDI, FN_HSPI_RX1_D, FN_VI1_G0, FN_VI3_DATA0,
- FN_DU1_DOTCLKOUT1, FN_TS_SCK1, FN_DREQ2_B, FN_TX2,
+ FN_TS_SCK1, FN_DREQ2_B, FN_TX2,
FN_SPA_TDO, FN_HCTS0_B, FN_VI1_G1, FN_VI3_DATA1,
FN_SSI_SCK1, FN_TS_SDEN1, FN_DACK2_B, FN_RX2, FN_HRTS0_B,
@@ -447,7 +459,8 @@ enum {
A0_MARK, SD1_DAT3_MARK, MMC0_D3_MARK, FD3_MARK,
BS_MARK, SD1_DAT2_MARK, MMC0_D2_MARK, FD2_MARK,
ATADIR0_MARK, SDSELF_MARK, HCTS1_MARK, TX4_C_MARK,
- USB_PENC2_MARK, SCK0_MARK, PWM1_MARK, PWMFSW0_MARK,
+ USB_PENC0_MARK, USB_PENC1_MARK, USB_PENC2_MARK,
+ SCK0_MARK, PWM1_MARK, PWMFSW0_MARK,
SCIF_CLK_MARK, TCLK0_C_MARK,
EX_CS0_MARK, RX3_C_IRDA_RX_C_MARK, MMC0_D6_MARK,
@@ -632,7 +645,7 @@ enum {
HSPI_CS1_D_MARK, ADICHS2_B_MARK, VI1_DATA6_VI1_B6_MARK, SD2_CD_MARK,
MT0_VCXO_MARK, SPA_TMS_MARK, HSPI_TX1_D_MARK, VI1_DATA7_VI1_B7_MARK,
SD2_WP_MARK, MT0_PWM_MARK, SPA_TDI_MARK, HSPI_RX1_D_MARK,
- VI1_G0_MARK, VI3_DATA0_MARK, DU1_DOTCLKOUT1_MARK, TS_SCK1_MARK,
+ VI1_G0_MARK, VI3_DATA0_MARK, TS_SCK1_MARK,
DREQ2_B_MARK, TX2_MARK, SPA_TDO_MARK, HCTS0_B_MARK,
VI1_G1_MARK, VI3_DATA1_MARK, SSI_SCK1_MARK, TS_SDEN1_MARK,
DACK2_B_MARK, RX2_MARK, HRTS0_B_MARK,
@@ -649,7 +662,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
PINMUX_DATA(AVS1_MARK, FN_AVS1),
@@ -658,6 +671,9 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(A18_MARK, FN_A18),
PINMUX_DATA(A19_MARK, FN_A19),
+ PINMUX_DATA(USB_PENC0_MARK, FN_USB_PENC0),
+ PINMUX_DATA(USB_PENC1_MARK, FN_USB_PENC1),
+
PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCK0, SEL_SCIF0_0),
PINMUX_IPSR_DATA(IP0_2_0, PWM1),
@@ -1399,7 +1415,6 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
- PINMUX_IPSR_DATA(IP11_26_24, DU1_DOTCLKOUT1),
PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
PINMUX_IPSR_MODSEL_DATA(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
PINMUX_IPSR_DATA(IP11_26_24, TX2),
@@ -1450,280 +1465,1260 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCK4_B, SEL_SCIF4_1),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
- GPIO_FN(AVS1), GPIO_FN(AVS2), GPIO_FN(A17), GPIO_FN(A18),
- GPIO_FN(A19),
+};
- /* IPSR0 */
- GPIO_FN(USB_PENC2), GPIO_FN(SCK0), GPIO_FN(PWM1), GPIO_FN(PWMFSW0),
- GPIO_FN(SCIF_CLK), GPIO_FN(TCLK0_C), GPIO_FN(BS), GPIO_FN(SD1_DAT2),
- GPIO_FN(MMC0_D2), GPIO_FN(FD2), GPIO_FN(ATADIR0), GPIO_FN(SDSELF),
- GPIO_FN(HCTS1), GPIO_FN(TX4_C), GPIO_FN(A0), GPIO_FN(SD1_DAT3),
- GPIO_FN(MMC0_D3), GPIO_FN(FD3), GPIO_FN(A20), GPIO_FN(TX5_D),
- GPIO_FN(HSPI_TX2_B), GPIO_FN(A21), GPIO_FN(SCK5_D),
- GPIO_FN(HSPI_CLK2_B), GPIO_FN(A22), GPIO_FN(RX5_D),
- GPIO_FN(HSPI_RX2_B), GPIO_FN(VI1_R0), GPIO_FN(A23), GPIO_FN(FCLE),
- GPIO_FN(HSPI_CLK2), GPIO_FN(VI1_R1), GPIO_FN(A24), GPIO_FN(SD1_CD),
- GPIO_FN(MMC0_D4), GPIO_FN(FD4), GPIO_FN(HSPI_CS2), GPIO_FN(VI1_R2),
- GPIO_FN(SSI_WS78_B), GPIO_FN(A25), GPIO_FN(SD1_WP), GPIO_FN(MMC0_D5),
- GPIO_FN(FD5), GPIO_FN(HSPI_RX2), GPIO_FN(VI1_R3), GPIO_FN(TX5_B),
- GPIO_FN(SSI_SDATA7_B), GPIO_FN(CTS0_B), GPIO_FN(CLKOUT),
- GPIO_FN(TX3C_IRDA_TX_C), GPIO_FN(PWM0_B), GPIO_FN(CS0),
- GPIO_FN(HSPI_CS2_B), GPIO_FN(CS1_A26), GPIO_FN(HSPI_TX2),
- GPIO_FN(SDSELF_B), GPIO_FN(RD_WR), GPIO_FN(FWE), GPIO_FN(ATAG0),
- GPIO_FN(VI1_R7), GPIO_FN(HRTS1), GPIO_FN(RX4_C),
+/* - DU0 -------------------------------------------------------------------- */
+static const unsigned int du0_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ 188, 187, 186, 185, 184, 183,
+ 194, 193, 192, 191, 190, 189,
+ 200, 199, 198, 197, 196, 195,
+};
+static const unsigned int du0_rgb666_mux[] = {
+ DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+ DU0_DR3_MARK, DU0_DR2_MARK,
+ DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+ DU0_DG3_MARK, DU0_DG2_MARK,
+ DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+ DU0_DB3_MARK, DU0_DB2_MARK,
+};
+static const unsigned int du0_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ 188, 187, 186, 185, 184, 183, 24, 23,
+ 194, 193, 192, 191, 190, 189, 26, 25,
+ 200, 199, 198, 197, 196, 195, 28, 27,
+};
+static const unsigned int du0_rgb888_mux[] = {
+ DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+ DU0_DR3_MARK, DU0_DR2_MARK, DU0_DR1_MARK, DU0_DR0_MARK,
+ DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+ DU0_DG3_MARK, DU0_DG2_MARK, DU0_DG1_MARK, DU0_DG0_MARK,
+ DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+ DU0_DB3_MARK, DU0_DB2_MARK, DU0_DB1_MARK, DU0_DB0_MARK,
+};
+static const unsigned int du0_clk_in_pins[] = {
+ /* CLKIN */
+ 29,
+};
+static const unsigned int du0_clk_in_mux[] = {
+ DU0_DOTCLKIN_MARK,
+};
+static const unsigned int du0_clk_out_0_pins[] = {
+ /* CLKOUT */
+ 180,
+};
+static const unsigned int du0_clk_out_0_mux[] = {
+ DU0_DOTCLKOUT0_MARK,
+};
+static const unsigned int du0_clk_out_1_pins[] = {
+ /* CLKOUT */
+ 30,
+};
+static const unsigned int du0_clk_out_1_mux[] = {
+ DU0_DOTCLKOUT1_MARK,
+};
+static const unsigned int du0_sync_0_pins[] = {
+ /* VSYNC, HSYNC, DISP */
+ 182, 181, 31,
+};
+static const unsigned int du0_sync_0_mux[] = {
+ DU0_EXHSYNC_DU0_HSYNC_MARK, DU0_EXVSYNC_DU0_VSYNC_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du0_sync_1_pins[] = {
+ /* VSYNC, HSYNC, DISP */
+ 182, 181, 32,
+};
+static const unsigned int du0_sync_1_mux[] = {
+ DU0_EXHSYNC_DU0_HSYNC_MARK, DU0_EXVSYNC_DU0_VSYNC_MARK,
+ DU0_DISP_MARK
+};
+static const unsigned int du0_oddf_pins[] = {
+ /* ODDF */
+ 31,
+};
+static const unsigned int du0_oddf_mux[] = {
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du0_cde_pins[] = {
+ /* CDE */
+ 33,
+};
+static const unsigned int du0_cde_mux[] = {
+ DU0_CDE_MARK
+};
+/* - DU1 -------------------------------------------------------------------- */
+static const unsigned int du1_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ 41, 40, 39, 38, 37, 36,
+ 49, 48, 47, 46, 45, 44,
+ 57, 56, 55, 54, 53, 52,
+};
+static const unsigned int du1_rgb666_mux[] = {
+ DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+ DU1_DR3_MARK, DU1_DR2_MARK,
+ DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+ DU1_DG3_MARK, DU1_DG2_MARK,
+ DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+ DU1_DB3_MARK, DU1_DB2_MARK,
+};
+static const unsigned int du1_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ 41, 40, 39, 38, 37, 36, 35, 34,
+ 49, 48, 47, 46, 45, 44, 43, 32,
+ 57, 56, 55, 54, 53, 52, 51, 50,
+};
+static const unsigned int du1_rgb888_mux[] = {
+ DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+ DU1_DR3_MARK, DU1_DR2_MARK, DU1_DR1_MARK, DU1_DR0_MARK,
+ DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+ DU1_DG3_MARK, DU1_DG2_MARK, DU1_DG1_MARK, DU1_DG0_MARK,
+ DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+ DU1_DB3_MARK, DU1_DB2_MARK, DU1_DB1_MARK, DU1_DB0_MARK,
+};
+static const unsigned int du1_clk_in_pins[] = {
+ /* CLKIN */
+ 58,
+};
+static const unsigned int du1_clk_in_mux[] = {
+ DU1_DOTCLKIN_MARK,
+};
+static const unsigned int du1_clk_out_pins[] = {
+ /* CLKOUT */
+ 59,
+};
+static const unsigned int du1_clk_out_mux[] = {
+ DU1_DOTCLKOUT_MARK,
+};
+static const unsigned int du1_sync_0_pins[] = {
+ /* VSYNC, HSYNC, DISP */
+ 61, 60, 62,
+};
+static const unsigned int du1_sync_0_mux[] = {
+ DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du1_sync_1_pins[] = {
+ /* VSYNC, HSYNC, DISP */
+ 61, 60, 63,
+};
+static const unsigned int du1_sync_1_mux[] = {
+ DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
+ DU1_DISP_MARK
+};
+static const unsigned int du1_oddf_pins[] = {
+ /* ODDF */
+ 62,
+};
+static const unsigned int du1_oddf_mux[] = {
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du1_cde_pins[] = {
+ /* CDE */
+ 64,
+};
+static const unsigned int du1_cde_mux[] = {
+ DU1_CDE_MARK
+};
+/* - HSPI0 ------------------------------------------------------------------ */
+static const unsigned int hspi0_pins[] = {
+ /* CLK, CS, RX, TX */
+ 150, 151, 153, 152,
+};
+static const unsigned int hspi0_mux[] = {
+ HSPI_CLK0_MARK, HSPI_CS0_MARK, HSPI_RX0_MARK, HSPI_TX0_MARK,
+};
+/* - HSPI1 ------------------------------------------------------------------ */
+static const unsigned int hspi1_pins[] = {
+ /* CLK, CS, RX, TX */
+ 63, 58, 64, 62,
+};
+static const unsigned int hspi1_mux[] = {
+ HSPI_CLK1_MARK, HSPI_CS1_MARK, HSPI_RX1_MARK, HSPI_TX1_MARK,
+};
+static const unsigned int hspi1_b_pins[] = {
+ /* CLK, CS, RX, TX */
+ 90, 91, 93, 92,
+};
+static const unsigned int hspi1_b_mux[] = {
+ HSPI_CLK1_B_MARK, HSPI_CS1_B_MARK, HSPI_RX1_B_MARK, HSPI_TX1_B_MARK,
+};
+static const unsigned int hspi1_c_pins[] = {
+ /* CLK, CS, RX, TX */
+ 141, 142, 144, 143,
+};
+static const unsigned int hspi1_c_mux[] = {
+ HSPI_CLK1_C_MARK, HSPI_CS1_C_MARK, HSPI_RX1_C_MARK, HSPI_TX1_C_MARK,
+};
+static const unsigned int hspi1_d_pins[] = {
+ /* CLK, CS, RX, TX */
+ 101, 102, 104, 103,
+};
+static const unsigned int hspi1_d_mux[] = {
+ HSPI_CLK1_D_MARK, HSPI_CS1_D_MARK, HSPI_RX1_D_MARK, HSPI_TX1_D_MARK,
+};
+/* - HSPI2 ------------------------------------------------------------------ */
+static const unsigned int hspi2_pins[] = {
+ /* CLK, CS, RX, TX */
+ 9, 10, 11, 14,
+};
+static const unsigned int hspi2_mux[] = {
+ HSPI_CLK2_MARK, HSPI_CS2_MARK, HSPI_RX2_MARK, HSPI_TX2_MARK,
+};
+static const unsigned int hspi2_b_pins[] = {
+ /* CLK, CS, RX, TX */
+ 7, 13, 8, 6,
+};
+static const unsigned int hspi2_b_mux[] = {
+ HSPI_CLK2_B_MARK, HSPI_CS2_B_MARK, HSPI_RX2_B_MARK, HSPI_TX2_B_MARK,
+};
+/* - INTC ------------------------------------------------------------------- */
+static const unsigned int intc_irq0_pins[] = {
+ /* IRQ */
+ 78,
+};
+static const unsigned int intc_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_irq0_b_pins[] = {
+ /* IRQ */
+ 141,
+};
+static const unsigned int intc_irq0_b_mux[] = {
+ IRQ0_B_MARK,
+};
+static const unsigned int intc_irq1_pins[] = {
+ /* IRQ */
+ 79,
+};
+static const unsigned int intc_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_irq1_b_pins[] = {
+ /* IRQ */
+ 142,
+};
+static const unsigned int intc_irq1_b_mux[] = {
+ IRQ1_B_MARK,
+};
+static const unsigned int intc_irq2_pins[] = {
+ /* IRQ */
+ 88,
+};
+static const unsigned int intc_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_irq2_b_pins[] = {
+ /* IRQ */
+ 143,
+};
+static const unsigned int intc_irq2_b_mux[] = {
+ IRQ2_B_MARK,
+};
+static const unsigned int intc_irq3_pins[] = {
+ /* IRQ */
+ 89,
+};
+static const unsigned int intc_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_irq3_b_pins[] = {
+ /* IRQ */
+ 144,
+};
+static const unsigned int intc_irq3_b_mux[] = {
+ IRQ3_B_MARK,
+};
+/* - LSBC ------------------------------------------------------------------- */
+static const unsigned int lbsc_cs0_pins[] = {
+ /* CS */
+ 13,
+};
+static const unsigned int lbsc_cs0_mux[] = {
+ CS0_MARK,
+};
+static const unsigned int lbsc_cs1_pins[] = {
+ /* CS */
+ 14,
+};
+static const unsigned int lbsc_cs1_mux[] = {
+ CS1_A26_MARK,
+};
+static const unsigned int lbsc_ex_cs0_pins[] = {
+ /* CS */
+ 15,
+};
+static const unsigned int lbsc_ex_cs0_mux[] = {
+ EX_CS0_MARK,
+};
+static const unsigned int lbsc_ex_cs1_pins[] = {
+ /* CS */
+ 16,
+};
+static const unsigned int lbsc_ex_cs1_mux[] = {
+ EX_CS1_MARK,
+};
+static const unsigned int lbsc_ex_cs2_pins[] = {
+ /* CS */
+ 17,
+};
+static const unsigned int lbsc_ex_cs2_mux[] = {
+ EX_CS2_MARK,
+};
+static const unsigned int lbsc_ex_cs3_pins[] = {
+ /* CS */
+ 18,
+};
+static const unsigned int lbsc_ex_cs3_mux[] = {
+ EX_CS3_MARK,
+};
+static const unsigned int lbsc_ex_cs4_pins[] = {
+ /* CS */
+ 19,
+};
+static const unsigned int lbsc_ex_cs4_mux[] = {
+ EX_CS4_MARK,
+};
+static const unsigned int lbsc_ex_cs5_pins[] = {
+ /* CS */
+ 20,
+};
+static const unsigned int lbsc_ex_cs5_mux[] = {
+ EX_CS5_MARK,
+};
+/* - MMCIF ------------------------------------------------------------------ */
+static const unsigned int mmc0_data1_pins[] = {
+ /* D[0] */
+ 19,
+};
+static const unsigned int mmc0_data1_mux[] = {
+ MMC0_D0_MARK,
+};
+static const unsigned int mmc0_data4_pins[] = {
+ /* D[0:3] */
+ 19, 20, 21, 2,
+};
+static const unsigned int mmc0_data4_mux[] = {
+ MMC0_D0_MARK, MMC0_D1_MARK, MMC0_D2_MARK, MMC0_D3_MARK,
+};
+static const unsigned int mmc0_data8_pins[] = {
+ /* D[0:7] */
+ 19, 20, 21, 2, 10, 11, 15, 16,
+};
+static const unsigned int mmc0_data8_mux[] = {
+ MMC0_D0_MARK, MMC0_D1_MARK, MMC0_D2_MARK, MMC0_D3_MARK,
+ MMC0_D4_MARK, MMC0_D5_MARK, MMC0_D6_MARK, MMC0_D7_MARK,
+};
+static const unsigned int mmc0_ctrl_pins[] = {
+ /* CMD, CLK */
+ 18, 17,
+};
+static const unsigned int mmc0_ctrl_mux[] = {
+ MMC0_CMD_MARK, MMC0_CLK_MARK,
+};
+static const unsigned int mmc1_data1_pins[] = {
+ /* D[0] */
+ 72,
+};
+static const unsigned int mmc1_data1_mux[] = {
+ MMC1_D0_MARK,
+};
+static const unsigned int mmc1_data4_pins[] = {
+ /* D[0:3] */
+ 72, 73, 74, 75,
+};
+static const unsigned int mmc1_data4_mux[] = {
+ MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
+};
+static const unsigned int mmc1_data8_pins[] = {
+ /* D[0:7] */
+ 72, 73, 74, 75, 76, 77, 80, 81,
+};
+static const unsigned int mmc1_data8_mux[] = {
+ MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
+ MMC1_D4_MARK, MMC1_D5_MARK, MMC1_D6_MARK, MMC1_D7_MARK,
+};
+static const unsigned int mmc1_ctrl_pins[] = {
+ /* CMD, CLK */
+ 68, 65,
+};
+static const unsigned int mmc1_ctrl_mux[] = {
+ MMC1_CMD_MARK, MMC1_CLK_MARK,
+};
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RXD, TXD */
+ 153, 152,
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK */
+ 156,
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ 151, 150,
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_TANS_MARK, CTS0_MARK,
+};
+static const unsigned int scif0_data_b_pins[] = {
+ /* RXD, TXD */
+ 20, 19,
+};
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+static const unsigned int scif0_clk_b_pins[] = {
+ /* SCK */
+ 33,
+};
+static const unsigned int scif0_clk_b_mux[] = {
+ SCK0_B_MARK,
+};
+static const unsigned int scif0_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ 18, 11,
+};
+static const unsigned int scif0_ctrl_b_mux[] = {
+ RTS0_B_TANS_B_MARK, CTS0_B_MARK,
+};
+static const unsigned int scif0_data_c_pins[] = {
+ /* RXD, TXD */
+ 146, 147,
+};
+static const unsigned int scif0_data_c_mux[] = {
+ RX0_C_MARK, TX0_C_MARK,
+};
+static const unsigned int scif0_clk_c_pins[] = {
+ /* SCK */
+ 145,
+};
+static const unsigned int scif0_clk_c_mux[] = {
+ SCK0_C_MARK,
+};
+static const unsigned int scif0_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ 149, 148,
+};
+static const unsigned int scif0_ctrl_c_mux[] = {
+ RTS0_C_TANS_C_MARK, CTS0_C_MARK,
+};
+static const unsigned int scif0_data_d_pins[] = {
+ /* RXD, TXD */
+ 43, 42,
+};
+static const unsigned int scif0_data_d_mux[] = {
+ RX0_D_MARK, TX0_D_MARK,
+};
+static const unsigned int scif0_clk_d_pins[] = {
+ /* SCK */
+ 50,
+};
+static const unsigned int scif0_clk_d_mux[] = {
+ SCK0_D_MARK,
+};
+static const unsigned int scif0_ctrl_d_pins[] = {
+ /* RTS, CTS */
+ 51, 35,
+};
+static const unsigned int scif0_ctrl_d_mux[] = {
+ RTS0_D_TANS_D_MARK, CTS0_D_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RXD, TXD */
+ 149, 148,
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK */
+ 145,
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ 147, 146,
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_TANS_MARK, CTS1_MARK,
+};
+static const unsigned int scif1_data_b_pins[] = {
+ /* RXD, TXD */
+ 117, 114,
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK */
+ 113,
+};
+static const unsigned int scif1_clk_b_mux[] = {
+ SCK1_B_MARK,
+};
+static const unsigned int scif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ 115, 116,
+};
+static const unsigned int scif1_ctrl_b_mux[] = {
+ RTS1_B_TANS_B_MARK, CTS1_B_MARK,
+};
+static const unsigned int scif1_data_c_pins[] = {
+ /* RXD, TXD */
+ 67, 66,
+};
+static const unsigned int scif1_data_c_mux[] = {
+ RX1_C_MARK, TX1_C_MARK,
+};
+static const unsigned int scif1_clk_c_pins[] = {
+ /* SCK */
+ 86,
+};
+static const unsigned int scif1_clk_c_mux[] = {
+ SCK1_C_MARK,
+};
+static const unsigned int scif1_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ 69, 68,
+};
+static const unsigned int scif1_ctrl_c_mux[] = {
+ RTS1_C_TANS_C_MARK, CTS1_C_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_pins[] = {
+ /* RXD, TXD */
+ 106, 105,
+};
+static const unsigned int scif2_data_mux[] = {
+ RX2_MARK, TX2_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+ /* SCK */
+ 107,
+};
+static const unsigned int scif2_clk_mux[] = {
+ SCK2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+ /* RXD, TXD */
+ 120, 119,
+};
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+static const unsigned int scif2_clk_b_pins[] = {
+ /* SCK */
+ 118,
+};
+static const unsigned int scif2_clk_b_mux[] = {
+ SCK2_B_MARK,
+};
+static const unsigned int scif2_data_c_pins[] = {
+ /* RXD, TXD */
+ 33, 31,
+};
+static const unsigned int scif2_data_c_mux[] = {
+ RX2_C_MARK, TX2_C_MARK,
+};
+static const unsigned int scif2_clk_c_pins[] = {
+ /* SCK */
+ 32,
+};
+static const unsigned int scif2_clk_c_mux[] = {
+ SCK2_C_MARK,
+};
+static const unsigned int scif2_data_d_pins[] = {
+ /* RXD, TXD */
+ 64, 62,
+};
+static const unsigned int scif2_data_d_mux[] = {
+ RX2_D_MARK, TX2_D_MARK,
+};
+static const unsigned int scif2_clk_d_pins[] = {
+ /* SCK */
+ 63,
+};
+static const unsigned int scif2_clk_d_mux[] = {
+ SCK2_D_MARK,
+};
+static const unsigned int scif2_data_e_pins[] = {
+ /* RXD, TXD */
+ 20, 19,
+};
+static const unsigned int scif2_data_e_mux[] = {
+ RX2_E_MARK, TX2_E_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RXD, TXD */
+ 137, 136,
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_IRDA_RX_MARK, TX3_IRDA_TX_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK */
+ 135,
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
- /* IPSR1 */
- GPIO_FN(EX_CS0), GPIO_FN(RX3_C_IRDA_RX_C), GPIO_FN(MMC0_D6),
- GPIO_FN(FD6), GPIO_FN(EX_CS1), GPIO_FN(MMC0_D7), GPIO_FN(FD7),
- GPIO_FN(EX_CS2), GPIO_FN(SD1_CLK), GPIO_FN(MMC0_CLK), GPIO_FN(FALE),
- GPIO_FN(ATACS00), GPIO_FN(EX_CS3), GPIO_FN(SD1_CMD), GPIO_FN(MMC0_CMD),
- GPIO_FN(FRE), GPIO_FN(ATACS10), GPIO_FN(VI1_R4), GPIO_FN(RX5_B),
- GPIO_FN(HSCK1), GPIO_FN(SSI_SDATA8_B), GPIO_FN(RTS0_B_TANS_B),
- GPIO_FN(SSI_SDATA9), GPIO_FN(EX_CS4), GPIO_FN(SD1_DAT0),
- GPIO_FN(MMC0_D0), GPIO_FN(FD0), GPIO_FN(ATARD0), GPIO_FN(VI1_R5),
- GPIO_FN(SCK5_B), GPIO_FN(HTX1), GPIO_FN(TX2_E), GPIO_FN(TX0_B),
- GPIO_FN(SSI_SCK9), GPIO_FN(EX_CS5), GPIO_FN(SD1_DAT1),
- GPIO_FN(MMC0_D1), GPIO_FN(FD1), GPIO_FN(ATAWR0), GPIO_FN(VI1_R6),
- GPIO_FN(HRX1), GPIO_FN(RX2_E), GPIO_FN(RX0_B), GPIO_FN(SSI_WS9),
- GPIO_FN(MLB_CLK), GPIO_FN(PWM2), GPIO_FN(SCK4), GPIO_FN(MLB_SIG),
- GPIO_FN(PWM3), GPIO_FN(TX4), GPIO_FN(MLB_DAT), GPIO_FN(PWM4),
- GPIO_FN(RX4), GPIO_FN(HTX0), GPIO_FN(TX1), GPIO_FN(SDATA),
- GPIO_FN(CTS0_C), GPIO_FN(SUB_TCK), GPIO_FN(CC5_STATE2),
- GPIO_FN(CC5_STATE10), GPIO_FN(CC5_STATE18), GPIO_FN(CC5_STATE26),
- GPIO_FN(CC5_STATE34),
+static const unsigned int scif3_data_b_pins[] = {
+ /* RXD, TXD */
+ 64, 62,
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_IRDA_RX_B_MARK, TX3_B_IRDA_TX_B_MARK,
+};
+static const unsigned int scif3_data_c_pins[] = {
+ /* RXD, TXD */
+ 15, 12,
+};
+static const unsigned int scif3_data_c_mux[] = {
+ RX3_C_IRDA_RX_C_MARK, TX3C_IRDA_TX_C_MARK,
+};
+static const unsigned int scif3_data_d_pins[] = {
+ /* RXD, TXD */
+ 30, 29,
+};
+static const unsigned int scif3_data_d_mux[] = {
+ RX3_D_IRDA_RX_D_MARK, TX3_D_IRDA_TX_D_MARK,
+};
+static const unsigned int scif3_data_e_pins[] = {
+ /* RXD, TXD */
+ 35, 34,
+};
+static const unsigned int scif3_data_e_mux[] = {
+ RX3_E_IRDA_RX_E_MARK, TX3_E_IRDA_TX_E_MARK,
+};
+static const unsigned int scif3_clk_e_pins[] = {
+ /* SCK */
+ 42,
+};
+static const unsigned int scif3_clk_e_mux[] = {
+ SCK3_E_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RXD, TXD */
+ 123, 122,
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK */
+ 121,
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+ /* RXD, TXD */
+ 111, 110,
+};
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_clk_b_pins[] = {
+ /* SCK */
+ 112,
+};
+static const unsigned int scif4_clk_b_mux[] = {
+ SCK4_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+ /* RXD, TXD */
+ 22, 21,
+};
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_data_d_pins[] = {
+ /* RXD, TXD */
+ 69, 68,
+};
+static const unsigned int scif4_data_d_mux[] = {
+ RX4_D_MARK, TX4_D_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_pins[] = {
+ /* RXD, TXD */
+ 51, 50,
+};
+static const unsigned int scif5_data_mux[] = {
+ RX5_MARK, TX5_MARK,
+};
+static const unsigned int scif5_clk_pins[] = {
+ /* SCK */
+ 43,
+};
+static const unsigned int scif5_clk_mux[] = {
+ SCK5_MARK,
+};
+static const unsigned int scif5_data_b_pins[] = {
+ /* RXD, TXD */
+ 18, 11,
+};
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+static const unsigned int scif5_clk_b_pins[] = {
+ /* SCK */
+ 19,
+};
+static const unsigned int scif5_clk_b_mux[] = {
+ SCK5_B_MARK,
+};
+static const unsigned int scif5_data_c_pins[] = {
+ /* RXD, TXD */
+ 24, 23,
+};
+static const unsigned int scif5_data_c_mux[] = {
+ RX5_C_MARK, TX5_C_MARK,
+};
+static const unsigned int scif5_clk_c_pins[] = {
+ /* SCK */
+ 28,
+};
+static const unsigned int scif5_clk_c_mux[] = {
+ SCK5_C_MARK,
+};
+static const unsigned int scif5_data_d_pins[] = {
+ /* RXD, TXD */
+ 8, 6,
+};
+static const unsigned int scif5_data_d_mux[] = {
+ RX5_D_MARK, TX5_D_MARK,
+};
+static const unsigned int scif5_clk_d_pins[] = {
+ /* SCK */
+ 7,
+};
+static const unsigned int scif5_clk_d_mux[] = {
+ SCK5_D_MARK,
+};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ 117,
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ 117, 118, 119, 120,
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK, SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CMD, CLK */
+ 114, 113,
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CMD_MARK, SD0_CLK_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ 115,
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ 116,
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ 19,
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ 19, 20, 21, 2,
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK, SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CMD, CLK */
+ 18, 17,
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CMD_MARK, SD1_CLK_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ 10,
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ 11,
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ 97,
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ 97, 98, 99, 100,
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK, SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CMD, CLK */
+ 102, 101,
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CMD_MARK, SD2_CLK_MARK,
+};
+static const unsigned int sdhi2_cd_pins[] = {
+ /* CD */
+ 103,
+};
+static const unsigned int sdhi2_cd_mux[] = {
+ SD2_CD_MARK,
+};
+static const unsigned int sdhi2_wp_pins[] = {
+ /* WP */
+ 104,
+};
+static const unsigned int sdhi2_wp_mux[] = {
+ SD2_WP_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ 50,
+};
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ 50, 51, 52, 53,
+};
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK, SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CMD, CLK */
+ 35, 34,
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CMD_MARK, SD3_CLK_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ 62,
+};
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ 64,
+};
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ /* OVC */
+ 150, 154,
+};
+static const unsigned int usb0_mux[] = {
+ USB_OVC0_MARK, USB_PENC0_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+ /* OVC */
+ 152, 155,
+};
+static const unsigned int usb1_mux[] = {
+ USB_OVC1_MARK, USB_PENC1_MARK,
+};
+/* - USB2 ------------------------------------------------------------------- */
+static const unsigned int usb2_pins[] = {
+ /* OVC, PENC */
+ 125, 156,
+};
+static const unsigned int usb2_mux[] = {
+ USB_OVC2_MARK, USB_PENC2_MARK,
+};
- /* IPSR2 */
- GPIO_FN(HRX0), GPIO_FN(RX1), GPIO_FN(SCKZ), GPIO_FN(RTS0_C_TANS_C),
- GPIO_FN(SUB_TDI), GPIO_FN(CC5_STATE3), GPIO_FN(CC5_STATE11),
- GPIO_FN(CC5_STATE19), GPIO_FN(CC5_STATE27), GPIO_FN(CC5_STATE35),
- GPIO_FN(HSCK0), GPIO_FN(SCK1), GPIO_FN(MTS), GPIO_FN(PWM5),
- GPIO_FN(SCK0_C), GPIO_FN(SSI_SDATA9_B), GPIO_FN(SUB_TDO),
- GPIO_FN(CC5_STATE0), GPIO_FN(CC5_STATE8), GPIO_FN(CC5_STATE16),
- GPIO_FN(CC5_STATE24), GPIO_FN(CC5_STATE32), GPIO_FN(HCTS0),
- GPIO_FN(CTS1), GPIO_FN(STM), GPIO_FN(PWM0_D), GPIO_FN(RX0_C),
- GPIO_FN(SCIF_CLK_C), GPIO_FN(SUB_TRST), GPIO_FN(TCLK1_B),
- GPIO_FN(CC5_OSCOUT), GPIO_FN(HRTS0), GPIO_FN(RTS1_TANS),
- GPIO_FN(MDATA), GPIO_FN(TX0_C), GPIO_FN(SUB_TMS), GPIO_FN(CC5_STATE1),
- GPIO_FN(CC5_STATE9), GPIO_FN(CC5_STATE17), GPIO_FN(CC5_STATE25),
- GPIO_FN(CC5_STATE33), GPIO_FN(DU0_DR0), GPIO_FN(LCDOUT0),
- GPIO_FN(DREQ0), GPIO_FN(GPS_CLK_B), GPIO_FN(AUDATA0),
- GPIO_FN(TX5_C), GPIO_FN(DU0_DR1), GPIO_FN(LCDOUT1), GPIO_FN(DACK0),
- GPIO_FN(DRACK0), GPIO_FN(GPS_SIGN_B), GPIO_FN(AUDATA1), GPIO_FN(RX5_C),
- GPIO_FN(DU0_DR2), GPIO_FN(LCDOUT2), GPIO_FN(DU0_DR3), GPIO_FN(LCDOUT3),
- GPIO_FN(DU0_DR4), GPIO_FN(LCDOUT4), GPIO_FN(DU0_DR5), GPIO_FN(LCDOUT5),
- GPIO_FN(DU0_DR6), GPIO_FN(LCDOUT6), GPIO_FN(DU0_DR7), GPIO_FN(LCDOUT7),
- GPIO_FN(DU0_DG0), GPIO_FN(LCDOUT8), GPIO_FN(DREQ1), GPIO_FN(SCL2),
- GPIO_FN(AUDATA2),
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(du0_rgb666),
+ SH_PFC_PIN_GROUP(du0_rgb888),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du0_clk_out_0),
+ SH_PFC_PIN_GROUP(du0_clk_out_1),
+ SH_PFC_PIN_GROUP(du0_sync_0),
+ SH_PFC_PIN_GROUP(du0_sync_1),
+ SH_PFC_PIN_GROUP(du0_oddf),
+ SH_PFC_PIN_GROUP(du0_cde),
+ SH_PFC_PIN_GROUP(du1_rgb666),
+ SH_PFC_PIN_GROUP(du1_rgb888),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_out),
+ SH_PFC_PIN_GROUP(du1_sync_0),
+ SH_PFC_PIN_GROUP(du1_sync_1),
+ SH_PFC_PIN_GROUP(du1_oddf),
+ SH_PFC_PIN_GROUP(du1_cde),
+ SH_PFC_PIN_GROUP(hspi0),
+ SH_PFC_PIN_GROUP(hspi1),
+ SH_PFC_PIN_GROUP(hspi1_b),
+ SH_PFC_PIN_GROUP(hspi1_c),
+ SH_PFC_PIN_GROUP(hspi1_d),
+ SH_PFC_PIN_GROUP(hspi2),
+ SH_PFC_PIN_GROUP(hspi2_b),
+ SH_PFC_PIN_GROUP(intc_irq0),
+ SH_PFC_PIN_GROUP(intc_irq0_b),
+ SH_PFC_PIN_GROUP(intc_irq1),
+ SH_PFC_PIN_GROUP(intc_irq1_b),
+ SH_PFC_PIN_GROUP(intc_irq2),
+ SH_PFC_PIN_GROUP(intc_irq2_b),
+ SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(intc_irq3_b),
+ SH_PFC_PIN_GROUP(lbsc_cs0),
+ SH_PFC_PIN_GROUP(lbsc_cs1),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs0),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs1),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs2),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs3),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs4),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs5),
+ SH_PFC_PIN_GROUP(mmc0_data1),
+ SH_PFC_PIN_GROUP(mmc0_data4),
+ SH_PFC_PIN_GROUP(mmc0_data8),
+ SH_PFC_PIN_GROUP(mmc0_ctrl),
+ SH_PFC_PIN_GROUP(mmc1_data1),
+ SH_PFC_PIN_GROUP(mmc1_data4),
+ SH_PFC_PIN_GROUP(mmc1_data8),
+ SH_PFC_PIN_GROUP(mmc1_ctrl),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_clk_b),
+ SH_PFC_PIN_GROUP(scif0_ctrl_b),
+ SH_PFC_PIN_GROUP(scif0_data_c),
+ SH_PFC_PIN_GROUP(scif0_clk_c),
+ SH_PFC_PIN_GROUP(scif0_ctrl_c),
+ SH_PFC_PIN_GROUP(scif0_data_d),
+ SH_PFC_PIN_GROUP(scif0_clk_d),
+ SH_PFC_PIN_GROUP(scif0_ctrl_d),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_ctrl_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_clk_c),
+ SH_PFC_PIN_GROUP(scif1_ctrl_c),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif2_clk_b),
+ SH_PFC_PIN_GROUP(scif2_data_c),
+ SH_PFC_PIN_GROUP(scif2_clk_c),
+ SH_PFC_PIN_GROUP(scif2_data_d),
+ SH_PFC_PIN_GROUP(scif2_clk_d),
+ SH_PFC_PIN_GROUP(scif2_data_e),
+ SH_PFC_PIN_GROUP(scif3_data),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif3_data_d),
+ SH_PFC_PIN_GROUP(scif3_data_e),
+ SH_PFC_PIN_GROUP(scif3_clk_e),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_data_d),
+ SH_PFC_PIN_GROUP(scif5_data),
+ SH_PFC_PIN_GROUP(scif5_clk),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif5_data_c),
+ SH_PFC_PIN_GROUP(scif5_clk_c),
+ SH_PFC_PIN_GROUP(scif5_data_d),
+ SH_PFC_PIN_GROUP(scif5_clk_d),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd),
+ SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb2),
+};
- /* IPSR3 */
- GPIO_FN(DU0_DG1), GPIO_FN(LCDOUT9), GPIO_FN(DACK1), GPIO_FN(SDA2),
- GPIO_FN(AUDATA3), GPIO_FN(DU0_DG2), GPIO_FN(LCDOUT10),
- GPIO_FN(DU0_DG3), GPIO_FN(LCDOUT11), GPIO_FN(DU0_DG4),
- GPIO_FN(LCDOUT12), GPIO_FN(DU0_DG5), GPIO_FN(LCDOUT13),
- GPIO_FN(DU0_DG6), GPIO_FN(LCDOUT14), GPIO_FN(DU0_DG7),
- GPIO_FN(LCDOUT15), GPIO_FN(DU0_DB0), GPIO_FN(LCDOUT16),
- GPIO_FN(EX_WAIT1), GPIO_FN(SCL1), GPIO_FN(TCLK1), GPIO_FN(AUDATA4),
- GPIO_FN(DU0_DB1), GPIO_FN(LCDOUT17), GPIO_FN(EX_WAIT2), GPIO_FN(SDA1),
- GPIO_FN(GPS_MAG_B), GPIO_FN(AUDATA5), GPIO_FN(SCK5_C),
- GPIO_FN(DU0_DB2), GPIO_FN(LCDOUT18), GPIO_FN(DU0_DB3),
- GPIO_FN(LCDOUT19), GPIO_FN(DU0_DB4), GPIO_FN(LCDOUT20),
- GPIO_FN(DU0_DB5), GPIO_FN(LCDOUT21), GPIO_FN(DU0_DB6),
- GPIO_FN(LCDOUT22), GPIO_FN(DU0_DB7), GPIO_FN(LCDOUT23),
- GPIO_FN(DU0_DOTCLKIN), GPIO_FN(QSTVA_QVS), GPIO_FN(TX3_D_IRDA_TX_D),
- GPIO_FN(SCL3_B), GPIO_FN(DU0_DOTCLKOUT0), GPIO_FN(QCLK),
- GPIO_FN(DU0_DOTCLKOUT1), GPIO_FN(QSTVB_QVE), GPIO_FN(RX3_D_IRDA_RX_D),
- GPIO_FN(SDA3_B), GPIO_FN(SDA2_C), GPIO_FN(DACK0_B), GPIO_FN(DRACK0_B),
- GPIO_FN(DU0_EXHSYNC_DU0_HSYNC), GPIO_FN(QSTH_QHS),
- GPIO_FN(DU0_EXVSYNC_DU0_VSYNC), GPIO_FN(QSTB_QHE),
- GPIO_FN(DU0_EXODDF_DU0_ODDF_DISP_CDE), GPIO_FN(QCPV_QDE),
- GPIO_FN(CAN1_TX), GPIO_FN(TX2_C), GPIO_FN(SCL2_C), GPIO_FN(REMOCON),
+static const char * const du0_groups[] = {
+ "du0_rgb666",
+ "du0_rgb888",
+ "du0_clk_in",
+ "du0_clk_out_0",
+ "du0_clk_out_1",
+ "du0_sync_0",
+ "du0_sync_1",
+ "du0_oddf",
+ "du0_cde",
+};
- /* IPSR4 */
- GPIO_FN(DU0_DISP), GPIO_FN(QPOLA), GPIO_FN(CAN_CLK_C), GPIO_FN(SCK2_C),
- GPIO_FN(DU0_CDE), GPIO_FN(QPOLB), GPIO_FN(CAN1_RX), GPIO_FN(RX2_C),
- GPIO_FN(DREQ0_B), GPIO_FN(SSI_SCK78_B), GPIO_FN(SCK0_B),
- GPIO_FN(DU1_DR0), GPIO_FN(VI2_DATA0_VI2_B0), GPIO_FN(PWM6),
- GPIO_FN(SD3_CLK), GPIO_FN(TX3_E_IRDA_TX_E), GPIO_FN(AUDCK),
- GPIO_FN(PWMFSW0_B), GPIO_FN(DU1_DR1), GPIO_FN(VI2_DATA1_VI2_B1),
- GPIO_FN(PWM0), GPIO_FN(SD3_CMD), GPIO_FN(RX3_E_IRDA_RX_E),
- GPIO_FN(AUDSYNC), GPIO_FN(CTS0_D), GPIO_FN(DU1_DR2), GPIO_FN(VI2_G0),
- GPIO_FN(DU1_DR3), GPIO_FN(VI2_G1), GPIO_FN(DU1_DR4), GPIO_FN(VI2_G2),
- GPIO_FN(DU1_DR5), GPIO_FN(VI2_G3), GPIO_FN(DU1_DR6), GPIO_FN(VI2_G4),
- GPIO_FN(DU1_DR7), GPIO_FN(VI2_G5), GPIO_FN(DU1_DG0),
- GPIO_FN(VI2_DATA2_VI2_B2), GPIO_FN(SCL1_B), GPIO_FN(SD3_DAT2),
- GPIO_FN(SCK3_E), GPIO_FN(AUDATA6), GPIO_FN(TX0_D), GPIO_FN(DU1_DG1),
- GPIO_FN(VI2_DATA3_VI2_B3), GPIO_FN(SDA1_B), GPIO_FN(SD3_DAT3),
- GPIO_FN(SCK5), GPIO_FN(AUDATA7), GPIO_FN(RX0_D), GPIO_FN(DU1_DG2),
- GPIO_FN(VI2_G6), GPIO_FN(DU1_DG3), GPIO_FN(VI2_G7), GPIO_FN(DU1_DG4),
- GPIO_FN(VI2_R0), GPIO_FN(DU1_DG5), GPIO_FN(VI2_R1), GPIO_FN(DU1_DG6),
- GPIO_FN(VI2_R2), GPIO_FN(DU1_DG7), GPIO_FN(VI2_R3), GPIO_FN(DU1_DB0),
- GPIO_FN(VI2_DATA4_VI2_B4), GPIO_FN(SCL2_B), GPIO_FN(SD3_DAT0),
- GPIO_FN(TX5), GPIO_FN(SCK0_D),
+static const char * const du1_groups[] = {
+ "du1_rgb666",
+ "du1_rgb888",
+ "du1_clk_in",
+ "du1_clk_out",
+ "du1_sync_0",
+ "du1_sync_1",
+ "du1_oddf",
+ "du1_cde",
+};
- /* IPSR5 */
- GPIO_FN(DU1_DB1), GPIO_FN(VI2_DATA5_VI2_B5), GPIO_FN(SDA2_B),
- GPIO_FN(SD3_DAT1), GPIO_FN(RX5), GPIO_FN(RTS0_D_TANS_D),
- GPIO_FN(DU1_DB2), GPIO_FN(VI2_R4), GPIO_FN(DU1_DB3), GPIO_FN(VI2_R5),
- GPIO_FN(DU1_DB4), GPIO_FN(VI2_R6), GPIO_FN(DU1_DB5), GPIO_FN(VI2_R7),
- GPIO_FN(DU1_DB6), GPIO_FN(SCL2_D), GPIO_FN(DU1_DB7), GPIO_FN(SDA2_D),
- GPIO_FN(DU1_DOTCLKIN), GPIO_FN(VI2_CLKENB), GPIO_FN(HSPI_CS1),
- GPIO_FN(SCL1_D), GPIO_FN(DU1_DOTCLKOUT), GPIO_FN(VI2_FIELD),
- GPIO_FN(SDA1_D), GPIO_FN(DU1_EXHSYNC_DU1_HSYNC), GPIO_FN(VI2_HSYNC),
- GPIO_FN(VI3_HSYNC), GPIO_FN(DU1_EXVSYNC_DU1_VSYNC), GPIO_FN(VI2_VSYNC),
- GPIO_FN(VI3_VSYNC), GPIO_FN(DU1_EXODDF_DU1_ODDF_DISP_CDE),
- GPIO_FN(VI2_CLK), GPIO_FN(TX3_B_IRDA_TX_B), GPIO_FN(SD3_CD),
- GPIO_FN(HSPI_TX1), GPIO_FN(VI1_CLKENB), GPIO_FN(VI3_CLKENB),
- GPIO_FN(AUDIO_CLKC), GPIO_FN(TX2_D), GPIO_FN(SPEEDIN),
- GPIO_FN(GPS_SIGN_D), GPIO_FN(DU1_DISP), GPIO_FN(VI2_DATA6_VI2_B6),
- GPIO_FN(TCLK0), GPIO_FN(QSTVA_B_QVS_B), GPIO_FN(HSPI_CLK1),
- GPIO_FN(SCK2_D), GPIO_FN(AUDIO_CLKOUT_B), GPIO_FN(GPS_MAG_D),
- GPIO_FN(DU1_CDE), GPIO_FN(VI2_DATA7_VI2_B7), GPIO_FN(RX3_B_IRDA_RX_B),
- GPIO_FN(SD3_WP), GPIO_FN(HSPI_RX1), GPIO_FN(VI1_FIELD),
- GPIO_FN(VI3_FIELD), GPIO_FN(AUDIO_CLKOUT), GPIO_FN(RX2_D),
- GPIO_FN(GPS_CLK_C), GPIO_FN(GPS_CLK_D), GPIO_FN(AUDIO_CLKA),
- GPIO_FN(CAN_TXCLK), GPIO_FN(AUDIO_CLKB), GPIO_FN(USB_OVC2),
- GPIO_FN(CAN_DEBUGOUT0), GPIO_FN(MOUT0),
+static const char * const hspi0_groups[] = {
+ "hspi0",
+};
- /* IPSR6 */
- GPIO_FN(SSI_SCK0129), GPIO_FN(CAN_DEBUGOUT1), GPIO_FN(MOUT1),
- GPIO_FN(SSI_WS0129), GPIO_FN(CAN_DEBUGOUT2), GPIO_FN(MOUT2),
- GPIO_FN(SSI_SDATA0), GPIO_FN(CAN_DEBUGOUT3), GPIO_FN(MOUT5),
- GPIO_FN(SSI_SDATA1), GPIO_FN(CAN_DEBUGOUT4), GPIO_FN(MOUT6),
- GPIO_FN(SSI_SDATA2), GPIO_FN(CAN_DEBUGOUT5), GPIO_FN(SSI_SCK34),
- GPIO_FN(CAN_DEBUGOUT6), GPIO_FN(CAN0_TX_B), GPIO_FN(IERX),
- GPIO_FN(SSI_SCK9_C), GPIO_FN(SSI_WS34), GPIO_FN(CAN_DEBUGOUT7),
- GPIO_FN(CAN0_RX_B), GPIO_FN(IETX), GPIO_FN(SSI_WS9_C),
- GPIO_FN(SSI_SDATA3), GPIO_FN(PWM0_C), GPIO_FN(CAN_DEBUGOUT8),
- GPIO_FN(CAN_CLK_B), GPIO_FN(IECLK), GPIO_FN(SCIF_CLK_B),
- GPIO_FN(TCLK0_B), GPIO_FN(SSI_SDATA4), GPIO_FN(CAN_DEBUGOUT9),
- GPIO_FN(SSI_SDATA9_C), GPIO_FN(SSI_SCK5), GPIO_FN(ADICLK),
- GPIO_FN(CAN_DEBUGOUT10), GPIO_FN(SCK3), GPIO_FN(TCLK0_D),
- GPIO_FN(SSI_WS5), GPIO_FN(ADICS_SAMP), GPIO_FN(CAN_DEBUGOUT11),
- GPIO_FN(TX3_IRDA_TX), GPIO_FN(SSI_SDATA5), GPIO_FN(ADIDATA),
- GPIO_FN(CAN_DEBUGOUT12), GPIO_FN(RX3_IRDA_RX), GPIO_FN(SSI_SCK6),
- GPIO_FN(ADICHS0), GPIO_FN(CAN0_TX), GPIO_FN(IERX_B),
+static const char * const hspi1_groups[] = {
+ "hspi1",
+ "hspi1_b",
+ "hspi1_c",
+ "hspi1_d",
+};
- /* IPSR7 */
- GPIO_FN(SSI_WS6), GPIO_FN(ADICHS1), GPIO_FN(CAN0_RX), GPIO_FN(IETX_B),
- GPIO_FN(SSI_SDATA6), GPIO_FN(ADICHS2), GPIO_FN(CAN_CLK),
- GPIO_FN(IECLK_B), GPIO_FN(SSI_SCK78), GPIO_FN(CAN_DEBUGOUT13),
- GPIO_FN(IRQ0_B), GPIO_FN(SSI_SCK9_B), GPIO_FN(HSPI_CLK1_C),
- GPIO_FN(SSI_WS78), GPIO_FN(CAN_DEBUGOUT14), GPIO_FN(IRQ1_B),
- GPIO_FN(SSI_WS9_B), GPIO_FN(HSPI_CS1_C), GPIO_FN(SSI_SDATA7),
- GPIO_FN(CAN_DEBUGOUT15), GPIO_FN(IRQ2_B), GPIO_FN(TCLK1_C),
- GPIO_FN(HSPI_TX1_C), GPIO_FN(SSI_SDATA8), GPIO_FN(VSP),
- GPIO_FN(IRQ3_B), GPIO_FN(HSPI_RX1_C), GPIO_FN(SD0_CLK),
- GPIO_FN(ATACS01), GPIO_FN(SCK1_B), GPIO_FN(SD0_CMD), GPIO_FN(ATACS11),
- GPIO_FN(TX1_B), GPIO_FN(CC5_TDO), GPIO_FN(SD0_DAT0), GPIO_FN(ATADIR1),
- GPIO_FN(RX1_B), GPIO_FN(CC5_TRST), GPIO_FN(SD0_DAT1), GPIO_FN(ATAG1),
- GPIO_FN(SCK2_B), GPIO_FN(CC5_TMS), GPIO_FN(SD0_DAT2), GPIO_FN(ATARD1),
- GPIO_FN(TX2_B), GPIO_FN(CC5_TCK), GPIO_FN(SD0_DAT3), GPIO_FN(ATAWR1),
- GPIO_FN(RX2_B), GPIO_FN(CC5_TDI), GPIO_FN(SD0_CD), GPIO_FN(DREQ2),
- GPIO_FN(RTS1_B_TANS_B), GPIO_FN(SD0_WP), GPIO_FN(DACK2),
- GPIO_FN(CTS1_B),
+static const char * const hspi2_groups[] = {
+ "hspi2",
+ "hspi2_b",
+};
- /* IPSR8 */
- GPIO_FN(HSPI_CLK0), GPIO_FN(CTS0), GPIO_FN(USB_OVC0), GPIO_FN(AD_CLK),
- GPIO_FN(CC5_STATE4), GPIO_FN(CC5_STATE12), GPIO_FN(CC5_STATE20),
- GPIO_FN(CC5_STATE28), GPIO_FN(CC5_STATE36), GPIO_FN(HSPI_CS0),
- GPIO_FN(RTS0_TANS), GPIO_FN(USB_OVC1), GPIO_FN(AD_DI),
- GPIO_FN(CC5_STATE5), GPIO_FN(CC5_STATE13), GPIO_FN(CC5_STATE21),
- GPIO_FN(CC5_STATE29), GPIO_FN(CC5_STATE37), GPIO_FN(HSPI_TX0),
- GPIO_FN(TX0), GPIO_FN(CAN_DEBUG_HW_TRIGGER), GPIO_FN(AD_DO),
- GPIO_FN(CC5_STATE6), GPIO_FN(CC5_STATE14), GPIO_FN(CC5_STATE22),
- GPIO_FN(CC5_STATE30), GPIO_FN(CC5_STATE38), GPIO_FN(HSPI_RX0),
- GPIO_FN(RX0), GPIO_FN(CAN_STEP0), GPIO_FN(AD_NCS), GPIO_FN(CC5_STATE7),
- GPIO_FN(CC5_STATE15), GPIO_FN(CC5_STATE23), GPIO_FN(CC5_STATE31),
- GPIO_FN(CC5_STATE39), GPIO_FN(FMCLK), GPIO_FN(RDS_CLK), GPIO_FN(PCMOE),
- GPIO_FN(BPFCLK), GPIO_FN(PCMWE), GPIO_FN(FMIN), GPIO_FN(RDS_DATA),
- GPIO_FN(VI0_CLK), GPIO_FN(MMC1_CLK), GPIO_FN(VI0_CLKENB),
- GPIO_FN(TX1_C), GPIO_FN(HTX1_B), GPIO_FN(MT1_SYNC),
- GPIO_FN(VI0_FIELD), GPIO_FN(RX1_C), GPIO_FN(HRX1_B),
- GPIO_FN(VI0_HSYNC), GPIO_FN(VI0_DATA0_B_VI0_B0_B), GPIO_FN(CTS1_C),
- GPIO_FN(TX4_D), GPIO_FN(MMC1_CMD), GPIO_FN(HSCK1_B),
- GPIO_FN(VI0_VSYNC), GPIO_FN(VI0_DATA1_B_VI0_B1_B),
- GPIO_FN(RTS1_C_TANS_C), GPIO_FN(RX4_D), GPIO_FN(PWMFSW0_C),
+static const char * const intc_groups[] = {
+ "intc_irq0",
+ "intc_irq0_b",
+ "intc_irq1",
+ "intc_irq1_b",
+ "intc_irq2",
+ "intc_irq2_b",
+ "intc_irq3",
+ "intc_irq3_b",
+};
- /* IPSR9 */
- GPIO_FN(VI0_DATA0_VI0_B0), GPIO_FN(HRTS1_B), GPIO_FN(MT1_VCXO),
- GPIO_FN(VI0_DATA1_VI0_B1), GPIO_FN(HCTS1_B), GPIO_FN(MT1_PWM),
- GPIO_FN(VI0_DATA2_VI0_B2), GPIO_FN(MMC1_D0), GPIO_FN(VI0_DATA3_VI0_B3),
- GPIO_FN(MMC1_D1), GPIO_FN(VI0_DATA4_VI0_B4), GPIO_FN(MMC1_D2),
- GPIO_FN(VI0_DATA5_VI0_B5), GPIO_FN(MMC1_D3), GPIO_FN(VI0_DATA6_VI0_B6),
- GPIO_FN(MMC1_D4), GPIO_FN(ARM_TRACEDATA_0), GPIO_FN(VI0_DATA7_VI0_B7),
- GPIO_FN(MMC1_D5), GPIO_FN(ARM_TRACEDATA_1), GPIO_FN(VI0_G0),
- GPIO_FN(SSI_SCK78_C), GPIO_FN(IRQ0), GPIO_FN(ARM_TRACEDATA_2),
- GPIO_FN(VI0_G1), GPIO_FN(SSI_WS78_C), GPIO_FN(IRQ1),
- GPIO_FN(ARM_TRACEDATA_3), GPIO_FN(VI0_G2), GPIO_FN(ETH_TXD1),
- GPIO_FN(MMC1_D6), GPIO_FN(ARM_TRACEDATA_4), GPIO_FN(TS_SPSYNC0),
- GPIO_FN(VI0_G3), GPIO_FN(ETH_CRS_DV), GPIO_FN(MMC1_D7),
- GPIO_FN(ARM_TRACEDATA_5), GPIO_FN(TS_SDAT0), GPIO_FN(VI0_G4),
- GPIO_FN(ETH_TX_EN), GPIO_FN(SD2_DAT0_B), GPIO_FN(ARM_TRACEDATA_6),
- GPIO_FN(VI0_G5), GPIO_FN(ETH_RX_ER), GPIO_FN(SD2_DAT1_B),
- GPIO_FN(ARM_TRACEDATA_7), GPIO_FN(VI0_G6), GPIO_FN(ETH_RXD0),
- GPIO_FN(SD2_DAT2_B), GPIO_FN(ARM_TRACEDATA_8), GPIO_FN(VI0_G7),
- GPIO_FN(ETH_RXD1), GPIO_FN(SD2_DAT3_B), GPIO_FN(ARM_TRACEDATA_9),
+static const char * const lbsc_groups[] = {
+ "lbsc_cs0",
+ "lbsc_cs1",
+ "lbsc_ex_cs0",
+ "lbsc_ex_cs1",
+ "lbsc_ex_cs2",
+ "lbsc_ex_cs3",
+ "lbsc_ex_cs4",
+ "lbsc_ex_cs5",
+};
- /* IPSR10 */
- GPIO_FN(VI0_R0), GPIO_FN(SSI_SDATA7_C), GPIO_FN(SCK1_C),
- GPIO_FN(DREQ1_B), GPIO_FN(ARM_TRACEDATA_10), GPIO_FN(DREQ0_C),
- GPIO_FN(VI0_R1), GPIO_FN(SSI_SDATA8_C), GPIO_FN(DACK1_B),
- GPIO_FN(ARM_TRACEDATA_11), GPIO_FN(DACK0_C), GPIO_FN(DRACK0_C),
- GPIO_FN(VI0_R2), GPIO_FN(ETH_LINK), GPIO_FN(SD2_CLK_B), GPIO_FN(IRQ2),
- GPIO_FN(ARM_TRACEDATA_12), GPIO_FN(VI0_R3), GPIO_FN(ETH_MAGIC),
- GPIO_FN(SD2_CMD_B), GPIO_FN(IRQ3), GPIO_FN(ARM_TRACEDATA_13),
- GPIO_FN(VI0_R4), GPIO_FN(ETH_REFCLK), GPIO_FN(SD2_CD_B),
- GPIO_FN(HSPI_CLK1_B), GPIO_FN(ARM_TRACEDATA_14), GPIO_FN(MT1_CLK),
- GPIO_FN(TS_SCK0), GPIO_FN(VI0_R5), GPIO_FN(ETH_TXD0),
- GPIO_FN(SD2_WP_B), GPIO_FN(HSPI_CS1_B), GPIO_FN(ARM_TRACEDATA_15),
- GPIO_FN(MT1_D), GPIO_FN(TS_SDEN0), GPIO_FN(VI0_R6), GPIO_FN(ETH_MDC),
- GPIO_FN(DREQ2_C), GPIO_FN(HSPI_TX1_B), GPIO_FN(TRACECLK),
- GPIO_FN(MT1_BEN), GPIO_FN(PWMFSW0_D), GPIO_FN(VI0_R7),
- GPIO_FN(ETH_MDIO), GPIO_FN(DACK2_C), GPIO_FN(HSPI_RX1_B),
- GPIO_FN(SCIF_CLK_D), GPIO_FN(TRACECTL), GPIO_FN(MT1_PEN),
- GPIO_FN(VI1_CLK), GPIO_FN(SIM_D), GPIO_FN(SDA3), GPIO_FN(VI1_HSYNC),
- GPIO_FN(VI3_CLK), GPIO_FN(SSI_SCK4), GPIO_FN(GPS_SIGN_C),
- GPIO_FN(PWMFSW0_E), GPIO_FN(VI1_VSYNC), GPIO_FN(AUDIO_CLKOUT_C),
- GPIO_FN(SSI_WS4), GPIO_FN(SIM_CLK), GPIO_FN(GPS_MAG_C),
- GPIO_FN(SPV_TRST), GPIO_FN(SCL3),
+static const char * const mmc0_groups[] = {
+ "mmc0_data1",
+ "mmc0_data4",
+ "mmc0_data8",
+ "mmc0_ctrl",
+};
- /* IPSR11 */
- GPIO_FN(VI1_DATA0_VI1_B0), GPIO_FN(SD2_DAT0), GPIO_FN(SIM_RST),
- GPIO_FN(SPV_TCK), GPIO_FN(ADICLK_B), GPIO_FN(VI1_DATA1_VI1_B1),
- GPIO_FN(SD2_DAT1), GPIO_FN(MT0_CLK), GPIO_FN(SPV_TMS),
- GPIO_FN(ADICS_B_SAMP_B), GPIO_FN(VI1_DATA2_VI1_B2), GPIO_FN(SD2_DAT2),
- GPIO_FN(MT0_D), GPIO_FN(SPVTDI), GPIO_FN(ADIDATA_B),
- GPIO_FN(VI1_DATA3_VI1_B3), GPIO_FN(SD2_DAT3), GPIO_FN(MT0_BEN),
- GPIO_FN(SPV_TDO), GPIO_FN(ADICHS0_B), GPIO_FN(VI1_DATA4_VI1_B4),
- GPIO_FN(SD2_CLK), GPIO_FN(MT0_PEN), GPIO_FN(SPA_TRST),
- GPIO_FN(HSPI_CLK1_D), GPIO_FN(ADICHS1_B), GPIO_FN(VI1_DATA5_VI1_B5),
- GPIO_FN(SD2_CMD), GPIO_FN(MT0_SYNC), GPIO_FN(SPA_TCK),
- GPIO_FN(HSPI_CS1_D), GPIO_FN(ADICHS2_B), GPIO_FN(VI1_DATA6_VI1_B6),
- GPIO_FN(SD2_CD), GPIO_FN(MT0_VCXO), GPIO_FN(SPA_TMS),
- GPIO_FN(HSPI_TX1_D), GPIO_FN(VI1_DATA7_VI1_B7), GPIO_FN(SD2_WP),
- GPIO_FN(MT0_PWM), GPIO_FN(SPA_TDI), GPIO_FN(HSPI_RX1_D),
- GPIO_FN(VI1_G0), GPIO_FN(VI3_DATA0), GPIO_FN(DU1_DOTCLKOUT1),
- GPIO_FN(TS_SCK1), GPIO_FN(DREQ2_B), GPIO_FN(TX2), GPIO_FN(SPA_TDO),
- GPIO_FN(HCTS0_B), GPIO_FN(VI1_G1), GPIO_FN(VI3_DATA1),
- GPIO_FN(SSI_SCK1), GPIO_FN(TS_SDEN1), GPIO_FN(DACK2_B), GPIO_FN(RX2),
- GPIO_FN(HRTS0_B),
+static const char * const mmc1_groups[] = {
+ "mmc1_data1",
+ "mmc1_data4",
+ "mmc1_data8",
+ "mmc1_ctrl",
+};
- /* IPSR12 */
- GPIO_FN(VI1_G2), GPIO_FN(VI3_DATA2), GPIO_FN(SSI_WS1),
- GPIO_FN(TS_SPSYNC1), GPIO_FN(SCK2), GPIO_FN(HSCK0_B), GPIO_FN(VI1_G3),
- GPIO_FN(VI3_DATA3), GPIO_FN(SSI_SCK2), GPIO_FN(TS_SDAT1),
- GPIO_FN(SCL1_C), GPIO_FN(HTX0_B), GPIO_FN(VI1_G4), GPIO_FN(VI3_DATA4),
- GPIO_FN(SSI_WS2), GPIO_FN(SDA1_C), GPIO_FN(SIM_RST_B),
- GPIO_FN(HRX0_B), GPIO_FN(VI1_G5), GPIO_FN(VI3_DATA5),
- GPIO_FN(GPS_CLK), GPIO_FN(FSE), GPIO_FN(TX4_B), GPIO_FN(SIM_D_B),
- GPIO_FN(VI1_G6), GPIO_FN(VI3_DATA6), GPIO_FN(GPS_SIGN), GPIO_FN(FRB),
- GPIO_FN(RX4_B), GPIO_FN(SIM_CLK_B), GPIO_FN(VI1_G7),
- GPIO_FN(VI3_DATA7), GPIO_FN(GPS_MAG), GPIO_FN(FCE), GPIO_FN(SCK4_B),
-};
-
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+ "scif0_data_b",
+ "scif0_clk_b",
+ "scif0_ctrl_b",
+ "scif0_data_c",
+ "scif0_clk_c",
+ "scif0_ctrl_c",
+ "scif0_data_d",
+ "scif0_clk_d",
+ "scif0_ctrl_d",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_ctrl_b",
+ "scif1_data_c",
+ "scif1_clk_c",
+ "scif1_ctrl_c",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data",
+ "scif2_clk",
+ "scif2_data_b",
+ "scif2_clk_b",
+ "scif2_data_c",
+ "scif2_clk_c",
+ "scif2_data_d",
+ "scif2_clk_d",
+ "scif2_data_e",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data",
+ "scif3_clk",
+ "scif3_data_b",
+ "scif3_data_c",
+ "scif3_data_d",
+ "scif3_data_e",
+ "scif3_clk_e",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_data_b",
+ "scif4_clk_b",
+ "scif4_data_c",
+ "scif4_data_d",
+};
+
+static const char * const scif5_groups[] = {
+ "scif5_data",
+ "scif5_clk",
+ "scif5_data_b",
+ "scif5_clk_b",
+ "scif5_data_c",
+ "scif5_clk_c",
+ "scif5_data_d",
+ "scif5_clk_d",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_ctrl",
+ "sdhi2_cd",
+ "sdhi2_wp",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
+static const char * const usb1_groups[] = {
+ "usb1",
+};
+
+static const char * const usb2_groups[] = {
+ "usb2",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(hspi0),
+ SH_PFC_FUNCTION(hspi1),
+ SH_PFC_FUNCTION(hspi2),
+ SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(lbsc),
+ SH_PFC_FUNCTION(mmc0),
+ SH_PFC_FUNCTION(mmc1),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb2),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1) {
GP_0_31_FN, FN_IP3_31_29,
GP_0_30_FN, FN_IP3_26_24,
@@ -2412,7 +3407,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_G1, FN_VI3_DATA1, FN_SSI_SCK1, FN_TS_SDEN1,
FN_DACK2_B, FN_RX2, FN_HRTS0_B, 0,
/* IP11_26_24 [3] */
- FN_VI1_G0, FN_VI3_DATA0, FN_DU1_DOTCLKOUT1, FN_TS_SCK1,
+ FN_VI1_G0, FN_VI3_DATA0, 0, FN_TS_SCK1,
FN_DREQ2_B, FN_TX2, FN_SPA_TDO, FN_HCTS0_B,
/* IP11_23_21 [3] */
FN_VI1_DATA7_VI1_B7, FN_SD2_WP, FN_MT0_PWM, FN_SPA_TDI,
@@ -2558,66 +3553,24 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_I2C1 [2] */
FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3 }
},
- { PINMUX_CFG_REG("INOUTSEL0", 0xffc40004, 32, 1) { GP_INOUTSEL(0) } },
- { PINMUX_CFG_REG("INOUTSEL1", 0xffc41004, 32, 1) { GP_INOUTSEL(1) } },
- { PINMUX_CFG_REG("INOUTSEL2", 0xffc42004, 32, 1) { GP_INOUTSEL(2) } },
- { PINMUX_CFG_REG("INOUTSEL3", 0xffc43004, 32, 1) { GP_INOUTSEL(3) } },
- { PINMUX_CFG_REG("INOUTSEL4", 0xffc44004, 32, 1) { GP_INOUTSEL(4) } },
- { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1) { GP_INOUTSEL(5) } },
- { PINMUX_CFG_REG("INOUTSEL6", 0xffc46004, 32, 1) {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- GP_6_8_IN, GP_6_8_OUT,
- GP_6_7_IN, GP_6_7_OUT,
- GP_6_6_IN, GP_6_6_OUT,
- GP_6_5_IN, GP_6_5_OUT,
- GP_6_4_IN, GP_6_4_OUT,
- GP_6_3_IN, GP_6_3_OUT,
- GP_6_2_IN, GP_6_2_OUT,
- GP_6_1_IN, GP_6_1_OUT,
- GP_6_0_IN, GP_6_0_OUT, }
- },
- { },
-};
-
-static struct pinmux_data_reg pinmux_data_regs[] = {
- { PINMUX_DATA_REG("INDT0", 0xffc40008, 32) { GP_INDT(0) } },
- { PINMUX_DATA_REG("INDT1", 0xffc41008, 32) { GP_INDT(1) } },
- { PINMUX_DATA_REG("INDT2", 0xffc42008, 32) { GP_INDT(2) } },
- { PINMUX_DATA_REG("INDT3", 0xffc43008, 32) { GP_INDT(3) } },
- { PINMUX_DATA_REG("INDT4", 0xffc44008, 32) { GP_INDT(4) } },
- { PINMUX_DATA_REG("INDT5", 0xffc45008, 32) { GP_INDT(5) } },
- { PINMUX_DATA_REG("INDT6", 0xffc46008, 32) {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, GP_6_8_DATA,
- GP_6_7_DATA, GP_6_6_DATA, GP_6_5_DATA, GP_6_4_DATA,
- GP_6_3_DATA, GP_6_2_DATA, GP_6_1_DATA, GP_6_0_DATA }
- },
{ },
};
-struct sh_pfc_soc_info r8a7779_pinmux_info = {
+const struct sh_pfc_soc_info r8a7779_pinmux_info = {
.name = "r8a7779_pfc",
.unlock_reg = 0xfffc0000, /* PMMR */
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
- .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_GP_0_0,
- .last_gpio = GPIO_FN_SCK4_B,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
- .data_regs = pinmux_data_regs,
.gpio_data = pinmux_data,
.gpio_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
index 01b425d..f63d51d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -272,7 +272,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PA */
PINMUX_DATA(PA7_DATA, PA7_IN),
@@ -703,7 +703,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(GPIO_PA7, PA7_DATA),
@@ -815,265 +815,269 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PF2, PF2_DATA),
PINMUX_GPIO(GPIO_PF1, PF1_DATA),
PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* INTC */
- PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK),
- PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
-
- PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
- PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
- PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
- PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK),
- PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK),
+ GPIO_FN(PINT7_PB),
+ GPIO_FN(PINT6_PB),
+ GPIO_FN(PINT5_PB),
+ GPIO_FN(PINT4_PB),
+ GPIO_FN(PINT3_PB),
+ GPIO_FN(PINT2_PB),
+ GPIO_FN(PINT1_PB),
+ GPIO_FN(PINT0_PB),
+ GPIO_FN(PINT7_PD),
+ GPIO_FN(PINT6_PD),
+ GPIO_FN(PINT5_PD),
+ GPIO_FN(PINT4_PD),
+ GPIO_FN(PINT3_PD),
+ GPIO_FN(PINT2_PD),
+ GPIO_FN(PINT1_PD),
+ GPIO_FN(PINT0_PD),
+ GPIO_FN(IRQ7_PB),
+ GPIO_FN(IRQ6_PB),
+ GPIO_FN(IRQ5_PB),
+ GPIO_FN(IRQ4_PB),
+ GPIO_FN(IRQ3_PB),
+ GPIO_FN(IRQ2_PB),
+ GPIO_FN(IRQ1_PB),
+ GPIO_FN(IRQ0_PB),
+ GPIO_FN(IRQ7_PD),
+ GPIO_FN(IRQ6_PD),
+ GPIO_FN(IRQ5_PD),
+ GPIO_FN(IRQ4_PD),
+ GPIO_FN(IRQ3_PD),
+ GPIO_FN(IRQ2_PD),
+ GPIO_FN(IRQ1_PD),
+ GPIO_FN(IRQ0_PD),
+ GPIO_FN(IRQ7_PE),
+ GPIO_FN(IRQ6_PE),
+ GPIO_FN(IRQ5_PE),
+ GPIO_FN(IRQ4_PE),
+ GPIO_FN(IRQ3_PE),
+ GPIO_FN(IRQ2_PE),
+ GPIO_FN(IRQ1_PE),
+ GPIO_FN(IRQ0_PE),
+
+ GPIO_FN(WDTOVF),
+ GPIO_FN(IRQOUT),
+ GPIO_FN(REFOUT),
+ GPIO_FN(IRQOUT_REFOUT),
+ GPIO_FN(UBCTRG),
/* CAN */
- PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
- PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
- PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
- PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+ GPIO_FN(CTX1),
+ GPIO_FN(CRX1),
+ GPIO_FN(CTX0),
+ GPIO_FN(CTX0_CTX1),
+ GPIO_FN(CRX0),
+ GPIO_FN(CRX0_CRX1),
/* IIC3 */
- PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
- PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
- PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
- PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
- PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
- PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
- PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
- PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+ GPIO_FN(SDA3),
+ GPIO_FN(SCL3),
+ GPIO_FN(SDA2),
+ GPIO_FN(SCL2),
+ GPIO_FN(SDA1),
+ GPIO_FN(SCL1),
+ GPIO_FN(SDA0),
+ GPIO_FN(SCL0),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK),
- PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
- PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ GPIO_FN(TEND0_PD),
+ GPIO_FN(TEND0_PE),
+ GPIO_FN(DACK0_PD),
+ GPIO_FN(DACK0_PE),
+ GPIO_FN(DREQ0_PD),
+ GPIO_FN(DREQ0_PE),
+ GPIO_FN(TEND1_PD),
+ GPIO_FN(TEND1_PE),
+ GPIO_FN(DACK1_PD),
+ GPIO_FN(DACK1_PE),
+ GPIO_FN(DREQ1_PD),
+ GPIO_FN(DREQ1_PE),
+ GPIO_FN(DACK2),
+ GPIO_FN(DREQ2),
+ GPIO_FN(DACK3),
+ GPIO_FN(DREQ3),
/* ADC */
- PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK),
+ GPIO_FN(ADTRG_PD),
+ GPIO_FN(ADTRG_PE),
/* BSC */
- PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
- PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
- PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
- PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
- PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
- PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
- PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
- PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
- PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
- PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
- PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
- PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
- PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
- PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
- PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
- PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
- PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
- PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK),
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
- PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK),
- PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK),
- PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK),
- PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
- PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
- PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
- PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK),
- PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
- PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK),
- PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
- PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK),
- PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK),
- PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK),
- PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK),
- PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK),
- PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK),
- PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
- PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
- PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
- PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
- PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK),
+ GPIO_FN(D31),
+ GPIO_FN(D30),
+ GPIO_FN(D29),
+ GPIO_FN(D28),
+ GPIO_FN(D27),
+ GPIO_FN(D26),
+ GPIO_FN(D25),
+ GPIO_FN(D24),
+ GPIO_FN(D23),
+ GPIO_FN(D22),
+ GPIO_FN(D21),
+ GPIO_FN(D20),
+ GPIO_FN(D19),
+ GPIO_FN(D18),
+ GPIO_FN(D17),
+ GPIO_FN(D16),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(A21),
+ GPIO_FN(CS4),
+ GPIO_FN(MRES),
+ GPIO_FN(BS),
+ GPIO_FN(IOIS16),
+ GPIO_FN(CS1),
+ GPIO_FN(CS6_CE1B),
+ GPIO_FN(CE2B),
+ GPIO_FN(CS5_CE1A),
+ GPIO_FN(CE2A),
+ GPIO_FN(FRAME),
+ GPIO_FN(WAIT),
+ GPIO_FN(RDWR),
+ GPIO_FN(CKE),
+ GPIO_FN(CASU),
+ GPIO_FN(BREQ),
+ GPIO_FN(RASU),
+ GPIO_FN(BACK),
+ GPIO_FN(CASL),
+ GPIO_FN(RASL),
+ GPIO_FN(WE3_DQMUU_AH_ICIO_WR),
+ GPIO_FN(WE2_DQMUL_ICIORD),
+ GPIO_FN(WE1_DQMLU_WE),
+ GPIO_FN(WE0_DQMLL),
+ GPIO_FN(CS3),
+ GPIO_FN(CS2),
+ GPIO_FN(A1),
+ GPIO_FN(A0),
+ GPIO_FN(CS7),
/* TMU */
- PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK),
+ GPIO_FN(TIOC4D),
+ GPIO_FN(TIOC4C),
+ GPIO_FN(TIOC4B),
+ GPIO_FN(TIOC4A),
+ GPIO_FN(TIOC3D),
+ GPIO_FN(TIOC3C),
+ GPIO_FN(TIOC3B),
+ GPIO_FN(TIOC3A),
+ GPIO_FN(TIOC2B),
+ GPIO_FN(TIOC1B),
+ GPIO_FN(TIOC2A),
+ GPIO_FN(TIOC1A),
+ GPIO_FN(TIOC0D),
+ GPIO_FN(TIOC0C),
+ GPIO_FN(TIOC0B),
+ GPIO_FN(TIOC0A),
+ GPIO_FN(TCLKD_PD),
+ GPIO_FN(TCLKC_PD),
+ GPIO_FN(TCLKB_PD),
+ GPIO_FN(TCLKA_PD),
+ GPIO_FN(TCLKD_PF),
+ GPIO_FN(TCLKC_PF),
+ GPIO_FN(TCLKB_PF),
+ GPIO_FN(TCLKA_PF),
/* SSU */
- PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK),
- PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK),
- PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK),
+ GPIO_FN(SCS0_PD),
+ GPIO_FN(SSO0_PD),
+ GPIO_FN(SSI0_PD),
+ GPIO_FN(SSCK0_PD),
+ GPIO_FN(SCS0_PF),
+ GPIO_FN(SSO0_PF),
+ GPIO_FN(SSI0_PF),
+ GPIO_FN(SSCK0_PF),
+ GPIO_FN(SCS1_PD),
+ GPIO_FN(SSO1_PD),
+ GPIO_FN(SSI1_PD),
+ GPIO_FN(SSCK1_PD),
+ GPIO_FN(SCS1_PF),
+ GPIO_FN(SSO1_PF),
+ GPIO_FN(SSI1_PF),
+ GPIO_FN(SSCK1_PF),
/* SCIF */
- PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
- PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
- PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
- PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
- PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
- PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
- PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
- PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ GPIO_FN(TXD0),
+ GPIO_FN(RXD0),
+ GPIO_FN(SCK0),
+ GPIO_FN(TXD1),
+ GPIO_FN(RXD1),
+ GPIO_FN(SCK1),
+ GPIO_FN(TXD2),
+ GPIO_FN(RXD2),
+ GPIO_FN(SCK2),
+ GPIO_FN(RTS3),
+ GPIO_FN(CTS3),
+ GPIO_FN(TXD3),
+ GPIO_FN(RXD3),
+ GPIO_FN(SCK3),
/* SSI */
- PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+ GPIO_FN(AUDIO_CLK),
+ GPIO_FN(SSIDATA3),
+ GPIO_FN(SSIWS3),
+ GPIO_FN(SSISCK3),
+ GPIO_FN(SSIDATA2),
+ GPIO_FN(SSIWS2),
+ GPIO_FN(SSISCK2),
+ GPIO_FN(SSIDATA1),
+ GPIO_FN(SSIWS1),
+ GPIO_FN(SSISCK1),
+ GPIO_FN(SSIDATA0),
+ GPIO_FN(SSIWS0),
+ GPIO_FN(SSISCK0),
/* FLCTL */
- PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
- PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
- PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
- PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
- PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
- PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
- PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
- PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
- PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
- PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
- PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
- PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
- PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ GPIO_FN(FCE),
+ GPIO_FN(FRB),
+ GPIO_FN(NAF7),
+ GPIO_FN(NAF6),
+ GPIO_FN(NAF5),
+ GPIO_FN(NAF4),
+ GPIO_FN(NAF3),
+ GPIO_FN(NAF2),
+ GPIO_FN(NAF1),
+ GPIO_FN(NAF0),
+ GPIO_FN(FSC),
+ GPIO_FN(FOE),
+ GPIO_FN(FCDE),
+ GPIO_FN(FWE),
/* LCDC */
- PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ GPIO_FN(LCD_VEPWC),
+ GPIO_FN(LCD_VCPWC),
+ GPIO_FN(LCD_CLK),
+ GPIO_FN(LCD_FLM),
+ GPIO_FN(LCD_M_DISP),
+ GPIO_FN(LCD_CL2),
+ GPIO_FN(LCD_CL1),
+ GPIO_FN(LCD_DON),
+ GPIO_FN(LCD_DATA15),
+ GPIO_FN(LCD_DATA14),
+ GPIO_FN(LCD_DATA13),
+ GPIO_FN(LCD_DATA12),
+ GPIO_FN(LCD_DATA11),
+ GPIO_FN(LCD_DATA10),
+ GPIO_FN(LCD_DATA9),
+ GPIO_FN(LCD_DATA8),
+ GPIO_FN(LCD_DATA7),
+ GPIO_FN(LCD_DATA6),
+ GPIO_FN(LCD_DATA5),
+ GPIO_FN(LCD_DATA4),
+ GPIO_FN(LCD_DATA3),
+ GPIO_FN(LCD_DATA2),
+ GPIO_FN(LCD_DATA1),
+ GPIO_FN(LCD_DATA0),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) {
0, 0,
0, 0,
@@ -1525,7 +1529,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) {
0, 0, 0, 0,
0, 0, 0, 0,
@@ -1571,19 +1575,17 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7203_pinmux_info = {
+const struct sh_pfc_soc_info sh7203_pinmux_info = {
.name = "sh7203_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PA7,
- .last_gpio = GPIO_FN_LCD_DATA0,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 2ba5639..2846752 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -604,7 +604,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* Port A */
PINMUX_DATA(PA3_DATA, PA3_IN),
@@ -1072,7 +1072,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SD_D2_MARK, PK0MD_10),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* Port A */
PINMUX_GPIO(GPIO_PA3, PA3_DATA),
@@ -1216,257 +1216,261 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PK2, PK2_DATA),
PINMUX_GPIO(GPIO_PK1, PK1_DATA),
PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* INTC */
- PINMUX_GPIO(GPIO_FN_PINT7_PG, PINT7_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT6_PG, PINT6_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT5_PG, PINT5_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT4_PG, PINT4_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT3_PG, PINT3_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT2_PG, PINT2_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT1_PG, PINT1_PG_MARK),
-
- PINMUX_GPIO(GPIO_FN_IRQ7_PC, IRQ7_PC_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6_PC, IRQ6_PC_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5_PC, IRQ5_PC_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4_PC, IRQ4_PC_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PG, IRQ3_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PG, IRQ2_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PJ, IRQ1_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PJ, IRQ0_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+ GPIO_FN(PINT7_PG),
+ GPIO_FN(PINT6_PG),
+ GPIO_FN(PINT5_PG),
+ GPIO_FN(PINT4_PG),
+ GPIO_FN(PINT3_PG),
+ GPIO_FN(PINT2_PG),
+ GPIO_FN(PINT1_PG),
+
+ GPIO_FN(IRQ7_PC),
+ GPIO_FN(IRQ6_PC),
+ GPIO_FN(IRQ5_PC),
+ GPIO_FN(IRQ4_PC),
+ GPIO_FN(IRQ3_PG),
+ GPIO_FN(IRQ2_PG),
+ GPIO_FN(IRQ1_PJ),
+ GPIO_FN(IRQ0_PJ),
+ GPIO_FN(IRQ3_PE),
+ GPIO_FN(IRQ2_PE),
+ GPIO_FN(IRQ1_PE),
+ GPIO_FN(IRQ0_PE),
/* WDT */
- PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ GPIO_FN(WDTOVF),
/* CAN */
- PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
- PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
- PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+ GPIO_FN(CTX1),
+ GPIO_FN(CRX1),
+ GPIO_FN(CTX0),
+ GPIO_FN(CRX0),
+ GPIO_FN(CRX0_CRX1),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ GPIO_FN(TEND0),
+ GPIO_FN(DACK0),
+ GPIO_FN(DREQ0),
+ GPIO_FN(TEND1),
+ GPIO_FN(DACK1),
+ GPIO_FN(DREQ1),
/* ADC */
- PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+ GPIO_FN(ADTRG),
/* BSCh */
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
- PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
- PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
- PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
- PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
- PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
- PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
- PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
- PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
- PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
- PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
- PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
- PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
- PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
- PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
- PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
- PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
- PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
- PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
- PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
- PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
- PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
-
- PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
- PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
- PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
- PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
- PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
- PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
- PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
- PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
- PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
- PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
- PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
- PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
- PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
- PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
- PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
- PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
-
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
- PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
- PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
- PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
- PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
- PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
- PINMUX_GPIO(GPIO_FN_CS6CE1B, CS6CE1B_MARK),
- PINMUX_GPIO(GPIO_FN_CS5CE1A, CS5CE1A_MARK),
- PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
- PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
- PINMUX_GPIO(GPIO_FN_ICIOWRAH, ICIOWRAH_MARK),
- PINMUX_GPIO(GPIO_FN_ICIORD, ICIORD_MARK),
- PINMUX_GPIO(GPIO_FN_WE1DQMUWE, WE1DQMUWE_MARK),
- PINMUX_GPIO(GPIO_FN_WE0DQML, WE0DQML_MARK),
- PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
- PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
- PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
- PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
- PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
- PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(A21),
+ GPIO_FN(A20),
+ GPIO_FN(A19),
+ GPIO_FN(A18),
+ GPIO_FN(A17),
+ GPIO_FN(A16),
+ GPIO_FN(A15),
+ GPIO_FN(A14),
+ GPIO_FN(A13),
+ GPIO_FN(A12),
+ GPIO_FN(A11),
+ GPIO_FN(A10),
+ GPIO_FN(A9),
+ GPIO_FN(A8),
+ GPIO_FN(A7),
+ GPIO_FN(A6),
+ GPIO_FN(A5),
+ GPIO_FN(A4),
+ GPIO_FN(A3),
+ GPIO_FN(A2),
+ GPIO_FN(A1),
+ GPIO_FN(A0),
+
+ GPIO_FN(D15),
+ GPIO_FN(D14),
+ GPIO_FN(D13),
+ GPIO_FN(D12),
+ GPIO_FN(D11),
+ GPIO_FN(D10),
+ GPIO_FN(D9),
+ GPIO_FN(D8),
+ GPIO_FN(D7),
+ GPIO_FN(D6),
+ GPIO_FN(D5),
+ GPIO_FN(D4),
+ GPIO_FN(D3),
+ GPIO_FN(D2),
+ GPIO_FN(D1),
+ GPIO_FN(D0),
+
+ GPIO_FN(BS),
+ GPIO_FN(CS4),
+ GPIO_FN(CS3),
+ GPIO_FN(CS2),
+ GPIO_FN(CS1),
+ GPIO_FN(CS0),
+ GPIO_FN(CS6CE1B),
+ GPIO_FN(CS5CE1A),
+ GPIO_FN(CE2A),
+ GPIO_FN(CE2B),
+ GPIO_FN(RD),
+ GPIO_FN(RDWR),
+ GPIO_FN(ICIOWRAH),
+ GPIO_FN(ICIORD),
+ GPIO_FN(WE1DQMUWE),
+ GPIO_FN(WE0DQML),
+ GPIO_FN(RAS),
+ GPIO_FN(CAS),
+ GPIO_FN(CKE),
+ GPIO_FN(WAIT),
+ GPIO_FN(BREQ),
+ GPIO_FN(BACK),
+ GPIO_FN(IOIS16),
/* TMU */
- PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKD, TCLKD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKC, TCLKC_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKB, TCLKB_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKA, TCLKA_MARK),
+ GPIO_FN(TIOC4D),
+ GPIO_FN(TIOC4C),
+ GPIO_FN(TIOC4B),
+ GPIO_FN(TIOC4A),
+ GPIO_FN(TIOC3D),
+ GPIO_FN(TIOC3C),
+ GPIO_FN(TIOC3B),
+ GPIO_FN(TIOC3A),
+ GPIO_FN(TIOC2B),
+ GPIO_FN(TIOC1B),
+ GPIO_FN(TIOC2A),
+ GPIO_FN(TIOC1A),
+ GPIO_FN(TIOC0D),
+ GPIO_FN(TIOC0C),
+ GPIO_FN(TIOC0B),
+ GPIO_FN(TIOC0A),
+ GPIO_FN(TCLKD),
+ GPIO_FN(TCLKC),
+ GPIO_FN(TCLKB),
+ GPIO_FN(TCLKA),
/* SCIF */
- PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
- PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
- PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
- PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
- PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
- PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
- PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
- PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
- PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
- PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
- PINMUX_GPIO(GPIO_FN_TXD5, TXD5_MARK),
- PINMUX_GPIO(GPIO_FN_RXD5, RXD5_MARK),
- PINMUX_GPIO(GPIO_FN_TXD6, TXD6_MARK),
- PINMUX_GPIO(GPIO_FN_RXD6, RXD6_MARK),
- PINMUX_GPIO(GPIO_FN_TXD7, TXD7_MARK),
- PINMUX_GPIO(GPIO_FN_RXD7, RXD7_MARK),
- PINMUX_GPIO(GPIO_FN_RTS1, RTS1_MARK),
- PINMUX_GPIO(GPIO_FN_CTS1, CTS1_MARK),
+ GPIO_FN(TXD0),
+ GPIO_FN(RXD0),
+ GPIO_FN(SCK0),
+ GPIO_FN(TXD1),
+ GPIO_FN(RXD1),
+ GPIO_FN(SCK1),
+ GPIO_FN(TXD2),
+ GPIO_FN(RXD2),
+ GPIO_FN(SCK2),
+ GPIO_FN(RTS3),
+ GPIO_FN(CTS3),
+ GPIO_FN(TXD3),
+ GPIO_FN(RXD3),
+ GPIO_FN(SCK3),
+ GPIO_FN(TXD4),
+ GPIO_FN(RXD4),
+ GPIO_FN(TXD5),
+ GPIO_FN(RXD5),
+ GPIO_FN(TXD6),
+ GPIO_FN(RXD6),
+ GPIO_FN(TXD7),
+ GPIO_FN(RXD7),
+ GPIO_FN(RTS1),
+ GPIO_FN(CTS1),
/* RSPI */
- PINMUX_GPIO(GPIO_FN_RSPCK0, RSPCK0_MARK),
- PINMUX_GPIO(GPIO_FN_MOSI0, MOSI0_MARK),
- PINMUX_GPIO(GPIO_FN_MISO0_PF12, MISO0_PF12_MARK),
- PINMUX_GPIO(GPIO_FN_MISO1, MISO1_MARK),
- PINMUX_GPIO(GPIO_FN_SSL00, SSL00_MARK),
- PINMUX_GPIO(GPIO_FN_RSPCK1, RSPCK1_MARK),
- PINMUX_GPIO(GPIO_FN_MOSI1, MOSI1_MARK),
- PINMUX_GPIO(GPIO_FN_MISO1_PG19, MISO1_PG19_MARK),
- PINMUX_GPIO(GPIO_FN_SSL10, SSL10_MARK),
+ GPIO_FN(RSPCK0),
+ GPIO_FN(MOSI0),
+ GPIO_FN(MISO0_PF12),
+ GPIO_FN(MISO1),
+ GPIO_FN(SSL00),
+ GPIO_FN(RSPCK1),
+ GPIO_FN(MOSI1),
+ GPIO_FN(MISO1_PG19),
+ GPIO_FN(SSL10),
/* IIC3 */
- PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
- PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
- PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
- PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
- PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
- PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ GPIO_FN(SCL0),
+ GPIO_FN(SCL1),
+ GPIO_FN(SCL2),
+ GPIO_FN(SDA0),
+ GPIO_FN(SDA1),
+ GPIO_FN(SDA2),
/* SSI */
- PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
- PINMUX_GPIO(GPIO_FN_SSITXD0, SSITXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIRXD0, SSIRXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+ GPIO_FN(SSISCK0),
+ GPIO_FN(SSIWS0),
+ GPIO_FN(SSITXD0),
+ GPIO_FN(SSIRXD0),
+ GPIO_FN(SSIWS1),
+ GPIO_FN(SSIWS2),
+ GPIO_FN(SSIWS3),
+ GPIO_FN(SSISCK1),
+ GPIO_FN(SSISCK2),
+ GPIO_FN(SSISCK3),
+ GPIO_FN(SSIDATA1),
+ GPIO_FN(SSIDATA2),
+ GPIO_FN(SSIDATA3),
+ GPIO_FN(AUDIO_CLK),
/* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
- PINMUX_GPIO(GPIO_FN_SIOFTXD, SIOFTXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOFRXD, SIOFRXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOFSYNC, SIOFSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SIOFSCK, SIOFSCK_MARK),
+ GPIO_FN(SIOFTXD),
+ GPIO_FN(SIOFRXD),
+ GPIO_FN(SIOFSYNC),
+ GPIO_FN(SIOFSCK),
/* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
- PINMUX_GPIO(GPIO_FN_SPDIF_IN, SPDIF_IN_MARK),
- PINMUX_GPIO(GPIO_FN_SPDIF_OUT, SPDIF_OUT_MARK),
+ GPIO_FN(SPDIF_IN),
+ GPIO_FN(SPDIF_OUT),
/* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
- PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ GPIO_FN(FCE),
+ GPIO_FN(FRB),
/* VDC3 */
- PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
-
- PINMUX_GPIO(GPIO_FN_DV_DATA7, DV_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA6, DV_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA5, DV_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA4, DV_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA3, DV_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA2, DV_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA1, DV_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA0, DV_DATA0_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_EXTCLK, LCD_EXTCLK_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_VSYNC, LCD_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ GPIO_FN(DV_CLK),
+ GPIO_FN(DV_VSYNC),
+ GPIO_FN(DV_HSYNC),
+
+ GPIO_FN(DV_DATA7),
+ GPIO_FN(DV_DATA6),
+ GPIO_FN(DV_DATA5),
+ GPIO_FN(DV_DATA4),
+ GPIO_FN(DV_DATA3),
+ GPIO_FN(DV_DATA2),
+ GPIO_FN(DV_DATA1),
+ GPIO_FN(DV_DATA0),
+
+ GPIO_FN(LCD_CLK),
+ GPIO_FN(LCD_EXTCLK),
+ GPIO_FN(LCD_VSYNC),
+ GPIO_FN(LCD_HSYNC),
+ GPIO_FN(LCD_DE),
+
+ GPIO_FN(LCD_DATA15),
+ GPIO_FN(LCD_DATA14),
+ GPIO_FN(LCD_DATA13),
+ GPIO_FN(LCD_DATA12),
+ GPIO_FN(LCD_DATA11),
+ GPIO_FN(LCD_DATA10),
+ GPIO_FN(LCD_DATA9),
+ GPIO_FN(LCD_DATA8),
+ GPIO_FN(LCD_DATA7),
+ GPIO_FN(LCD_DATA6),
+ GPIO_FN(LCD_DATA5),
+ GPIO_FN(LCD_DATA4),
+ GPIO_FN(LCD_DATA3),
+ GPIO_FN(LCD_DATA2),
+ GPIO_FN(LCD_DATA1),
+ GPIO_FN(LCD_DATA0),
+
+ GPIO_FN(LCD_M_DISP),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2032,7 +2036,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR1", 0xfffe3814, 16) {
0, 0, 0, 0, 0, 0, 0, PA3_DATA,
0, 0, 0, 0, 0, 0, 0, PA2_DATA }
@@ -2110,19 +2114,17 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ }
};
-struct sh_pfc_soc_info sh7264_pinmux_info = {
+const struct sh_pfc_soc_info sh7264_pinmux_info = {
.name = "sh7264_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PA3,
- .last_gpio = GPIO_FN_LCD_M_DISP,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index b1b5d6d..4c401a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -781,7 +781,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* Port A */
PINMUX_DATA(PA1_DATA, PA1_IN),
@@ -1452,7 +1452,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* Port A */
PINMUX_GPIO(GPIO_PA1, PA1_DATA),
PINMUX_GPIO(GPIO_PA0, PA0_DATA),
@@ -1613,339 +1613,343 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* INTC */
- PINMUX_GPIO(GPIO_FN_IRQ7_PG, IRQ7_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6_PG, IRQ6_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5_PG, IRQ5_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4_PG, IRQ4_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PG, IRQ3_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PG, IRQ2_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PG, IRQ1_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PG, IRQ0_PG_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ7_PF, IRQ7_PF_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6_PF, IRQ6_PF_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5_PF, IRQ5_PF_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4_PF, IRQ4_PF_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_PJ, IRQ3_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_PJ, IRQ2_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PJ, IRQ1_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PJ, IRQ0_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_PC, IRQ1_PC_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_PC, IRQ0_PC_MARK),
-
- PINMUX_GPIO(GPIO_FN_PINT7_PG, PINT7_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT6_PG, PINT6_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT5_PG, PINT5_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT4_PG, PINT4_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT3_PG, PINT3_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT2_PG, PINT2_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT1_PG, PINT1_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT0_PG, PINT0_PG_MARK),
- PINMUX_GPIO(GPIO_FN_PINT7_PH, PINT7_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT6_PH, PINT6_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT5_PH, PINT5_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT4_PH, PINT4_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT3_PH, PINT3_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT2_PH, PINT2_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT1_PH, PINT1_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT0_PH, PINT0_PH_MARK),
- PINMUX_GPIO(GPIO_FN_PINT7_PJ, PINT7_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT6_PJ, PINT6_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT5_PJ, PINT5_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT4_PJ, PINT4_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT3_PJ, PINT3_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT2_PJ, PINT2_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT1_PJ, PINT1_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_PINT0_PJ, PINT0_PJ_MARK),
+ GPIO_FN(IRQ7_PG),
+ GPIO_FN(IRQ6_PG),
+ GPIO_FN(IRQ5_PG),
+ GPIO_FN(IRQ4_PG),
+ GPIO_FN(IRQ3_PG),
+ GPIO_FN(IRQ2_PG),
+ GPIO_FN(IRQ1_PG),
+ GPIO_FN(IRQ0_PG),
+ GPIO_FN(IRQ7_PF),
+ GPIO_FN(IRQ6_PF),
+ GPIO_FN(IRQ5_PF),
+ GPIO_FN(IRQ4_PF),
+ GPIO_FN(IRQ3_PJ),
+ GPIO_FN(IRQ2_PJ),
+ GPIO_FN(IRQ1_PJ),
+ GPIO_FN(IRQ0_PJ),
+ GPIO_FN(IRQ1_PC),
+ GPIO_FN(IRQ0_PC),
+
+ GPIO_FN(PINT7_PG),
+ GPIO_FN(PINT6_PG),
+ GPIO_FN(PINT5_PG),
+ GPIO_FN(PINT4_PG),
+ GPIO_FN(PINT3_PG),
+ GPIO_FN(PINT2_PG),
+ GPIO_FN(PINT1_PG),
+ GPIO_FN(PINT0_PG),
+ GPIO_FN(PINT7_PH),
+ GPIO_FN(PINT6_PH),
+ GPIO_FN(PINT5_PH),
+ GPIO_FN(PINT4_PH),
+ GPIO_FN(PINT3_PH),
+ GPIO_FN(PINT2_PH),
+ GPIO_FN(PINT1_PH),
+ GPIO_FN(PINT0_PH),
+ GPIO_FN(PINT7_PJ),
+ GPIO_FN(PINT6_PJ),
+ GPIO_FN(PINT5_PJ),
+ GPIO_FN(PINT4_PJ),
+ GPIO_FN(PINT3_PJ),
+ GPIO_FN(PINT2_PJ),
+ GPIO_FN(PINT1_PJ),
+ GPIO_FN(PINT0_PJ),
/* WDT */
- PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ GPIO_FN(WDTOVF),
/* CAN */
- PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
- PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
- PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
- PINMUX_GPIO(GPIO_FN_CRX0_CRX1_CRX2, CRX0_CRX1_CRX2_MARK),
+ GPIO_FN(CTX1),
+ GPIO_FN(CRX1),
+ GPIO_FN(CTX0),
+ GPIO_FN(CRX0),
+ GPIO_FN(CRX0_CRX1),
+ GPIO_FN(CRX0_CRX1_CRX2),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ GPIO_FN(TEND0),
+ GPIO_FN(DACK0),
+ GPIO_FN(DREQ0),
+ GPIO_FN(TEND1),
+ GPIO_FN(DACK1),
+ GPIO_FN(DREQ1),
/* ADC */
- PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+ GPIO_FN(ADTRG),
/* BSCh */
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
- PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
- PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
- PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
- PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
- PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
- PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
- PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
- PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
- PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
- PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
- PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
- PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
- PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
- PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
- PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
- PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
- PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
- PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
- PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
- PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
- PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
-
- PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
- PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
- PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
- PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
- PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
- PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
- PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
- PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
- PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
- PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
- PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
- PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
- PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
- PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
- PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
- PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
-
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
- PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
- PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
- PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
- PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
- PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
- PINMUX_GPIO(GPIO_FN_CS5CE1A, CS5CE1A_MARK),
- PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
- PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
- PINMUX_GPIO(GPIO_FN_WE3ICIOWRAHDQMUU, WE3ICIOWRAHDQMUU_MARK),
- PINMUX_GPIO(GPIO_FN_WE2ICIORDDQMUL, WE2ICIORDDQMUL_MARK),
- PINMUX_GPIO(GPIO_FN_WE1DQMUWE, WE1DQMUWE_MARK),
- PINMUX_GPIO(GPIO_FN_WE0DQML, WE0DQML_MARK),
- PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
- PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
- PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
- PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
- PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
- PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(A21),
+ GPIO_FN(A20),
+ GPIO_FN(A19),
+ GPIO_FN(A18),
+ GPIO_FN(A17),
+ GPIO_FN(A16),
+ GPIO_FN(A15),
+ GPIO_FN(A14),
+ GPIO_FN(A13),
+ GPIO_FN(A12),
+ GPIO_FN(A11),
+ GPIO_FN(A10),
+ GPIO_FN(A9),
+ GPIO_FN(A8),
+ GPIO_FN(A7),
+ GPIO_FN(A6),
+ GPIO_FN(A5),
+ GPIO_FN(A4),
+ GPIO_FN(A3),
+ GPIO_FN(A2),
+ GPIO_FN(A1),
+ GPIO_FN(A0),
+
+ GPIO_FN(D15),
+ GPIO_FN(D14),
+ GPIO_FN(D13),
+ GPIO_FN(D12),
+ GPIO_FN(D11),
+ GPIO_FN(D10),
+ GPIO_FN(D9),
+ GPIO_FN(D8),
+ GPIO_FN(D7),
+ GPIO_FN(D6),
+ GPIO_FN(D5),
+ GPIO_FN(D4),
+ GPIO_FN(D3),
+ GPIO_FN(D2),
+ GPIO_FN(D1),
+ GPIO_FN(D0),
+
+ GPIO_FN(BS),
+ GPIO_FN(CS4),
+ GPIO_FN(CS3),
+ GPIO_FN(CS2),
+ GPIO_FN(CS1),
+ GPIO_FN(CS0),
+ GPIO_FN(CS5CE1A),
+ GPIO_FN(CE2A),
+ GPIO_FN(CE2B),
+ GPIO_FN(RD),
+ GPIO_FN(RDWR),
+ GPIO_FN(WE3ICIOWRAHDQMUU),
+ GPIO_FN(WE2ICIORDDQMUL),
+ GPIO_FN(WE1DQMUWE),
+ GPIO_FN(WE0DQML),
+ GPIO_FN(RAS),
+ GPIO_FN(CAS),
+ GPIO_FN(CKE),
+ GPIO_FN(WAIT),
+ GPIO_FN(BREQ),
+ GPIO_FN(BACK),
+ GPIO_FN(IOIS16),
/* TMU */
- PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
- PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKD, TCLKD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKC, TCLKC_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKB, TCLKB_MARK),
- PINMUX_GPIO(GPIO_FN_TCLKA, TCLKA_MARK),
+ GPIO_FN(TIOC4D),
+ GPIO_FN(TIOC4C),
+ GPIO_FN(TIOC4B),
+ GPIO_FN(TIOC4A),
+ GPIO_FN(TIOC3D),
+ GPIO_FN(TIOC3C),
+ GPIO_FN(TIOC3B),
+ GPIO_FN(TIOC3A),
+ GPIO_FN(TIOC2B),
+ GPIO_FN(TIOC1B),
+ GPIO_FN(TIOC2A),
+ GPIO_FN(TIOC1A),
+ GPIO_FN(TIOC0D),
+ GPIO_FN(TIOC0C),
+ GPIO_FN(TIOC0B),
+ GPIO_FN(TIOC0A),
+ GPIO_FN(TCLKD),
+ GPIO_FN(TCLKC),
+ GPIO_FN(TCLKB),
+ GPIO_FN(TCLKA),
/* SCIF */
- PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
- PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
- PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RTS1, RTS1_MARK),
- PINMUX_GPIO(GPIO_FN_CTS1, CTS1_MARK),
- PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
- PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
- PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
- PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
- PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
- PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
- PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
- PINMUX_GPIO(GPIO_FN_SCK5, SCK5_MARK),
- PINMUX_GPIO(GPIO_FN_TXD5, TXD5_MARK),
- PINMUX_GPIO(GPIO_FN_RXD5, RXD5_MARK),
- PINMUX_GPIO(GPIO_FN_RTS5, RTS5_MARK),
- PINMUX_GPIO(GPIO_FN_CTS5, CTS5_MARK),
- PINMUX_GPIO(GPIO_FN_SCK6, SCK6_MARK),
- PINMUX_GPIO(GPIO_FN_TXD6, TXD6_MARK),
- PINMUX_GPIO(GPIO_FN_RXD6, RXD6_MARK),
- PINMUX_GPIO(GPIO_FN_SCK7, SCK7_MARK),
- PINMUX_GPIO(GPIO_FN_TXD7, TXD7_MARK),
- PINMUX_GPIO(GPIO_FN_RXD7, RXD7_MARK),
- PINMUX_GPIO(GPIO_FN_RTS7, RTS7_MARK),
- PINMUX_GPIO(GPIO_FN_CTS7, CTS7_MARK),
+ GPIO_FN(SCK0),
+ GPIO_FN(TXD0),
+ GPIO_FN(RXD0),
+ GPIO_FN(SCK1),
+ GPIO_FN(TXD1),
+ GPIO_FN(RXD1),
+ GPIO_FN(RTS1),
+ GPIO_FN(CTS1),
+ GPIO_FN(SCK2),
+ GPIO_FN(TXD2),
+ GPIO_FN(RXD2),
+ GPIO_FN(SCK3),
+ GPIO_FN(TXD3),
+ GPIO_FN(RXD3),
+ GPIO_FN(SCK4),
+ GPIO_FN(TXD4),
+ GPIO_FN(RXD4),
+ GPIO_FN(SCK5),
+ GPIO_FN(TXD5),
+ GPIO_FN(RXD5),
+ GPIO_FN(RTS5),
+ GPIO_FN(CTS5),
+ GPIO_FN(SCK6),
+ GPIO_FN(TXD6),
+ GPIO_FN(RXD6),
+ GPIO_FN(SCK7),
+ GPIO_FN(TXD7),
+ GPIO_FN(RXD7),
+ GPIO_FN(RTS7),
+ GPIO_FN(CTS7),
/* RSPI */
- PINMUX_GPIO(GPIO_FN_RSPCK0_PJ16, RSPCK0_PJ16_MARK),
- PINMUX_GPIO(GPIO_FN_SSL00_PJ17, SSL00_PJ17_MARK),
- PINMUX_GPIO(GPIO_FN_MOSI0_PJ18, MOSI0_PJ18_MARK),
- PINMUX_GPIO(GPIO_FN_MISO0_PJ19, MISO0_PJ19_MARK),
- PINMUX_GPIO(GPIO_FN_RSPCK0_PB17, RSPCK0_PB17_MARK),
- PINMUX_GPIO(GPIO_FN_SSL00_PB18, SSL00_PB18_MARK),
- PINMUX_GPIO(GPIO_FN_MOSI0_PB19, MOSI0_PB19_MARK),
- PINMUX_GPIO(GPIO_FN_MISO0_PB20, MISO0_PB20_MARK),
- PINMUX_GPIO(GPIO_FN_RSPCK1, RSPCK1_MARK),
- PINMUX_GPIO(GPIO_FN_MOSI1, MOSI1_MARK),
- PINMUX_GPIO(GPIO_FN_MISO1, MISO1_MARK),
- PINMUX_GPIO(GPIO_FN_SSL10, SSL10_MARK),
+ GPIO_FN(RSPCK0_PJ16),
+ GPIO_FN(SSL00_PJ17),
+ GPIO_FN(MOSI0_PJ18),
+ GPIO_FN(MISO0_PJ19),
+ GPIO_FN(RSPCK0_PB17),
+ GPIO_FN(SSL00_PB18),
+ GPIO_FN(MOSI0_PB19),
+ GPIO_FN(MISO0_PB20),
+ GPIO_FN(RSPCK1),
+ GPIO_FN(MOSI1),
+ GPIO_FN(MISO1),
+ GPIO_FN(SSL10),
/* IIC3 */
- PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
- PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
- PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
- PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
- PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
- PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ GPIO_FN(SCL0),
+ GPIO_FN(SCL1),
+ GPIO_FN(SCL2),
+ GPIO_FN(SDA0),
+ GPIO_FN(SDA1),
+ GPIO_FN(SDA2),
/* SSI */
- PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
- PINMUX_GPIO(GPIO_FN_SSITXD0, SSITXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIRXD0, SSIRXD0_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
- PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
- PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_AUDIO_XOUT, AUDIO_XOUT_MARK),
+ GPIO_FN(SSISCK0),
+ GPIO_FN(SSIWS0),
+ GPIO_FN(SSITXD0),
+ GPIO_FN(SSIRXD0),
+ GPIO_FN(SSIWS1),
+ GPIO_FN(SSIWS2),
+ GPIO_FN(SSIWS3),
+ GPIO_FN(SSISCK1),
+ GPIO_FN(SSISCK2),
+ GPIO_FN(SSISCK3),
+ GPIO_FN(SSIDATA1),
+ GPIO_FN(SSIDATA2),
+ GPIO_FN(SSIDATA3),
+ GPIO_FN(AUDIO_CLK),
+ GPIO_FN(AUDIO_XOUT),
/* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
- PINMUX_GPIO(GPIO_FN_SIOFTXD, SIOFTXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOFRXD, SIOFRXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOFSYNC, SIOFSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SIOFSCK, SIOFSCK_MARK),
+ GPIO_FN(SIOFTXD),
+ GPIO_FN(SIOFRXD),
+ GPIO_FN(SIOFSYNC),
+ GPIO_FN(SIOFSCK),
/* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
- PINMUX_GPIO(GPIO_FN_SPDIF_IN, SPDIF_IN_MARK),
- PINMUX_GPIO(GPIO_FN_SPDIF_OUT, SPDIF_OUT_MARK),
+ GPIO_FN(SPDIF_IN),
+ GPIO_FN(SPDIF_OUT),
/* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
- PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ GPIO_FN(FCE),
+ GPIO_FN(FRB),
/* VDC3 */
- PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
-
- PINMUX_GPIO(GPIO_FN_DV_DATA23, DV_DATA23_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA22, DV_DATA22_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA21, DV_DATA21_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA20, DV_DATA20_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA19, DV_DATA19_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA18, DV_DATA18_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA17, DV_DATA17_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA16, DV_DATA16_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA15, DV_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA14, DV_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA13, DV_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA12, DV_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA11, DV_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA10, DV_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA9, DV_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA8, DV_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA7, DV_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA6, DV_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA5, DV_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA4, DV_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA3, DV_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA2, DV_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA1, DV_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_DV_DATA0, DV_DATA0_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_EXTCLK, LCD_EXTCLK_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_VSYNC, LCD_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
-
- PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ GPIO_FN(DV_CLK),
+ GPIO_FN(DV_VSYNC),
+ GPIO_FN(DV_HSYNC),
+
+ GPIO_FN(DV_DATA23),
+ GPIO_FN(DV_DATA22),
+ GPIO_FN(DV_DATA21),
+ GPIO_FN(DV_DATA20),
+ GPIO_FN(DV_DATA19),
+ GPIO_FN(DV_DATA18),
+ GPIO_FN(DV_DATA17),
+ GPIO_FN(DV_DATA16),
+ GPIO_FN(DV_DATA15),
+ GPIO_FN(DV_DATA14),
+ GPIO_FN(DV_DATA13),
+ GPIO_FN(DV_DATA12),
+ GPIO_FN(DV_DATA11),
+ GPIO_FN(DV_DATA10),
+ GPIO_FN(DV_DATA9),
+ GPIO_FN(DV_DATA8),
+ GPIO_FN(DV_DATA7),
+ GPIO_FN(DV_DATA6),
+ GPIO_FN(DV_DATA5),
+ GPIO_FN(DV_DATA4),
+ GPIO_FN(DV_DATA3),
+ GPIO_FN(DV_DATA2),
+ GPIO_FN(DV_DATA1),
+ GPIO_FN(DV_DATA0),
+
+ GPIO_FN(LCD_CLK),
+ GPIO_FN(LCD_EXTCLK),
+ GPIO_FN(LCD_VSYNC),
+ GPIO_FN(LCD_HSYNC),
+ GPIO_FN(LCD_DE),
+
+ GPIO_FN(LCD_DATA23_PG23),
+ GPIO_FN(LCD_DATA22_PG22),
+ GPIO_FN(LCD_DATA21_PG21),
+ GPIO_FN(LCD_DATA20_PG20),
+ GPIO_FN(LCD_DATA19_PG19),
+ GPIO_FN(LCD_DATA18_PG18),
+ GPIO_FN(LCD_DATA17_PG17),
+ GPIO_FN(LCD_DATA16_PG16),
+ GPIO_FN(LCD_DATA15_PG15),
+ GPIO_FN(LCD_DATA14_PG14),
+ GPIO_FN(LCD_DATA13_PG13),
+ GPIO_FN(LCD_DATA12_PG12),
+ GPIO_FN(LCD_DATA11_PG11),
+ GPIO_FN(LCD_DATA10_PG10),
+ GPIO_FN(LCD_DATA9_PG9),
+ GPIO_FN(LCD_DATA8_PG8),
+ GPIO_FN(LCD_DATA7_PG7),
+ GPIO_FN(LCD_DATA6_PG6),
+ GPIO_FN(LCD_DATA5_PG5),
+ GPIO_FN(LCD_DATA4_PG4),
+ GPIO_FN(LCD_DATA3_PG3),
+ GPIO_FN(LCD_DATA2_PG2),
+ GPIO_FN(LCD_DATA1_PG1),
+ GPIO_FN(LCD_DATA0_PG0),
+
+ GPIO_FN(LCD_DATA23_PJ23),
+ GPIO_FN(LCD_DATA22_PJ22),
+ GPIO_FN(LCD_DATA21_PJ21),
+ GPIO_FN(LCD_DATA20_PJ20),
+ GPIO_FN(LCD_DATA19_PJ19),
+ GPIO_FN(LCD_DATA18_PJ18),
+ GPIO_FN(LCD_DATA17_PJ17),
+ GPIO_FN(LCD_DATA16_PJ16),
+ GPIO_FN(LCD_DATA15_PJ15),
+ GPIO_FN(LCD_DATA14_PJ14),
+ GPIO_FN(LCD_DATA13_PJ13),
+ GPIO_FN(LCD_DATA12_PJ12),
+ GPIO_FN(LCD_DATA11_PJ11),
+ GPIO_FN(LCD_DATA10_PJ10),
+ GPIO_FN(LCD_DATA9_PJ9),
+ GPIO_FN(LCD_DATA8_PJ8),
+ GPIO_FN(LCD_DATA7_PJ7),
+ GPIO_FN(LCD_DATA6_PJ6),
+ GPIO_FN(LCD_DATA5_PJ5),
+ GPIO_FN(LCD_DATA4_PJ4),
+ GPIO_FN(LCD_DATA3_PJ3),
+ GPIO_FN(LCD_DATA2_PJ2),
+ GPIO_FN(LCD_DATA1_PJ1),
+ GPIO_FN(LCD_DATA0_PJ0),
+
+ GPIO_FN(LCD_M_DISP),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* "name" addr register_size Field_Width */
/* where Field_Width is 1 for single mode registers or 4 for upto 16
@@ -2734,7 +2738,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
0, 0, 0, 0, 0, 0, 0, PA1_DATA,
0, 0, 0, 0, 0, 0, 0, PA0_DATA }
@@ -2813,19 +2817,17 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ }
};
-struct sh_pfc_soc_info sh7269_pinmux_info = {
+const struct sh_pfc_soc_info sh7269_pinmux_info = {
.name = "sh7269_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PA1,
- .last_gpio = GPIO_FN_LCD_M_DISP,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index d44e7f0..df0ae21 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -368,7 +368,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* specify valid pin states for each pin in GPIO mode */
PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
@@ -929,11 +929,214 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
};
-static struct pinmux_gpio pinmux_gpios[] = {
-
- /* PORT */
+static struct sh_pfc_pin pinmux_pins[] = {
GPIO_PORT_ALL(),
+};
+/* - MMCIF ------------------------------------------------------------------ */
+static const unsigned int mmc0_data1_0_pins[] = {
+ /* D[0] */
+ 84,
+};
+static const unsigned int mmc0_data1_0_mux[] = {
+ MMCD0_0_MARK,
+};
+static const unsigned int mmc0_data4_0_pins[] = {
+ /* D[0:3] */
+ 84, 85, 86, 87,
+};
+static const unsigned int mmc0_data4_0_mux[] = {
+ MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
+};
+static const unsigned int mmc0_data8_0_pins[] = {
+ /* D[0:7] */
+ 84, 85, 86, 87, 88, 89, 90, 91,
+};
+static const unsigned int mmc0_data8_0_mux[] = {
+ MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
+ MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
+};
+static const unsigned int mmc0_ctrl_0_pins[] = {
+ /* CMD, CLK */
+ 92, 99,
+};
+static const unsigned int mmc0_ctrl_0_mux[] = {
+ MMCCMD0_MARK, MMCCLK0_MARK,
+};
+
+static const unsigned int mmc0_data1_1_pins[] = {
+ /* D[0] */
+ 54,
+};
+static const unsigned int mmc0_data1_1_mux[] = {
+ MMCD1_0_MARK,
+};
+static const unsigned int mmc0_data4_1_pins[] = {
+ /* D[0:3] */
+ 54, 55, 56, 57,
+};
+static const unsigned int mmc0_data4_1_mux[] = {
+ MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
+};
+static const unsigned int mmc0_data8_1_pins[] = {
+ /* D[0:7] */
+ 54, 55, 56, 57, 58, 59, 60, 61,
+};
+static const unsigned int mmc0_data8_1_mux[] = {
+ MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
+ MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
+};
+static const unsigned int mmc0_ctrl_1_pins[] = {
+ /* CMD, CLK */
+ 67, 66,
+};
+static const unsigned int mmc0_ctrl_1_mux[] = {
+ MMCCMD1_MARK, MMCCLK1_MARK,
+};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ 173,
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SDHID0_0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ 173, 174, 175, 176,
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CMD, CLK */
+ 177, 171,
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SDHICMD0_MARK, SDHICLK0_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ 172,
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SDHICD0_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ 178,
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SDHIWP0_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ 180,
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SDHID1_0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ 180, 181, 182, 183,
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SDHID1_0_MARK, SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CMD, CLK */
+ 184, 179,
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SDHICMD1_MARK, SDHICLK1_MARK,
+};
+
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ 186,
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SDHID2_0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ 186, 187, 188, 189,
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SDHID2_0_MARK, SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CMD, CLK */
+ 190, 185,
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SDHICMD2_MARK, SDHICLK2_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(mmc0_data1_0),
+ SH_PFC_PIN_GROUP(mmc0_data4_0),
+ SH_PFC_PIN_GROUP(mmc0_data8_0),
+ SH_PFC_PIN_GROUP(mmc0_ctrl_0),
+ SH_PFC_PIN_GROUP(mmc0_data1_1),
+ SH_PFC_PIN_GROUP(mmc0_data4_1),
+ SH_PFC_PIN_GROUP(mmc0_data8_1),
+ SH_PFC_PIN_GROUP(mmc0_ctrl_1),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+};
+
+static const char * const mmc0_groups[] = {
+ "mmc0_data1_0",
+ "mmc0_data4_0",
+ "mmc0_data8_0",
+ "mmc0_ctrl_0",
+ "mmc0_data1_1",
+ "mmc0_data4_1",
+ "mmc0_data8_1",
+ "mmc0_ctrl_1",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_ctrl",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(mmc0),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+
+static const struct pinmux_func pinmux_func_gpios[] = {
/* IRQ */
GPIO_FN(IRQ0_6), GPIO_FN(IRQ0_162), GPIO_FN(IRQ1),
GPIO_FN(IRQ2_4), GPIO_FN(IRQ2_5), GPIO_FN(IRQ3_8),
@@ -1074,18 +1277,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(D11_NAF11), GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13),
GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15),
- /* MMCIF(1) */
- GPIO_FN(MMCD0_0), GPIO_FN(MMCD0_1), GPIO_FN(MMCD0_2),
- GPIO_FN(MMCD0_3), GPIO_FN(MMCD0_4), GPIO_FN(MMCD0_5),
- GPIO_FN(MMCD0_6), GPIO_FN(MMCD0_7), GPIO_FN(MMCCMD0),
- GPIO_FN(MMCCLK0),
-
- /* MMCIF(2) */
- GPIO_FN(MMCD1_0), GPIO_FN(MMCD1_1), GPIO_FN(MMCD1_2),
- GPIO_FN(MMCD1_3), GPIO_FN(MMCD1_4), GPIO_FN(MMCD1_5),
- GPIO_FN(MMCD1_6), GPIO_FN(MMCD1_7), GPIO_FN(MMCCLK1),
- GPIO_FN(MMCCMD1),
-
/* SPU2 */
GPIO_FN(VINT_I),
@@ -1182,25 +1373,12 @@ static struct pinmux_gpio pinmux_gpios[] = {
/* HDMI */
GPIO_FN(HDMI_HPD), GPIO_FN(HDMI_CEC),
- /* SDHI0 */
- GPIO_FN(SDHICLK0), GPIO_FN(SDHICD0), GPIO_FN(SDHICMD0),
- GPIO_FN(SDHIWP0), GPIO_FN(SDHID0_0), GPIO_FN(SDHID0_1),
- GPIO_FN(SDHID0_2), GPIO_FN(SDHID0_3),
-
- /* SDHI1 */
- GPIO_FN(SDHICLK1), GPIO_FN(SDHICMD1), GPIO_FN(SDHID1_0),
- GPIO_FN(SDHID1_1), GPIO_FN(SDHID1_2), GPIO_FN(SDHID1_3),
-
- /* SDHI2 */
- GPIO_FN(SDHICLK2), GPIO_FN(SDHICMD2), GPIO_FN(SDHID2_0),
- GPIO_FN(SDHID2_1), GPIO_FN(SDHID2_2), GPIO_FN(SDHID2_3),
-
/* SDENC */
GPIO_FN(SDENC_CPG),
GPIO_FN(SDENC_DV_CLKI),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xE6051000), /* PORT0CR */
PORTCR(1, 0xE6051001), /* PORT1CR */
PORTCR(2, 0xE6051002), /* PORT2CR */
@@ -1472,7 +1650,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PORTL095_064DR", 0xE6054008, 32) {
PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
@@ -1597,56 +1775,59 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
#define EXT_IRQ16L(n) evt2irq(0x200 + ((n) << 5))
#define EXT_IRQ16H(n) evt2irq(0x3200 + (((n) - 16) << 5))
-static struct pinmux_irq pinmux_irqs[] = {
- PINMUX_IRQ(EXT_IRQ16L(0), PORT6_FN0, PORT162_FN0),
- PINMUX_IRQ(EXT_IRQ16L(1), PORT12_FN0),
- PINMUX_IRQ(EXT_IRQ16L(2), PORT4_FN0, PORT5_FN0),
- PINMUX_IRQ(EXT_IRQ16L(3), PORT8_FN0, PORT16_FN0),
- PINMUX_IRQ(EXT_IRQ16L(4), PORT17_FN0, PORT163_FN0),
- PINMUX_IRQ(EXT_IRQ16L(5), PORT18_FN0),
- PINMUX_IRQ(EXT_IRQ16L(6), PORT39_FN0, PORT164_FN0),
- PINMUX_IRQ(EXT_IRQ16L(7), PORT40_FN0, PORT167_FN0),
- PINMUX_IRQ(EXT_IRQ16L(8), PORT41_FN0, PORT168_FN0),
- PINMUX_IRQ(EXT_IRQ16L(9), PORT42_FN0, PORT169_FN0),
- PINMUX_IRQ(EXT_IRQ16L(10), PORT65_FN0),
- PINMUX_IRQ(EXT_IRQ16L(11), PORT67_FN0),
- PINMUX_IRQ(EXT_IRQ16L(12), PORT80_FN0, PORT137_FN0),
- PINMUX_IRQ(EXT_IRQ16L(13), PORT81_FN0, PORT145_FN0),
- PINMUX_IRQ(EXT_IRQ16L(14), PORT82_FN0, PORT146_FN0),
- PINMUX_IRQ(EXT_IRQ16L(15), PORT83_FN0, PORT147_FN0),
- PINMUX_IRQ(EXT_IRQ16H(16), PORT84_FN0, PORT170_FN0),
- PINMUX_IRQ(EXT_IRQ16H(17), PORT85_FN0),
- PINMUX_IRQ(EXT_IRQ16H(18), PORT86_FN0),
- PINMUX_IRQ(EXT_IRQ16H(19), PORT87_FN0),
- PINMUX_IRQ(EXT_IRQ16H(20), PORT92_FN0),
- PINMUX_IRQ(EXT_IRQ16H(21), PORT93_FN0),
- PINMUX_IRQ(EXT_IRQ16H(22), PORT94_FN0),
- PINMUX_IRQ(EXT_IRQ16H(23), PORT95_FN0),
- PINMUX_IRQ(EXT_IRQ16H(24), PORT112_FN0),
- PINMUX_IRQ(EXT_IRQ16H(25), PORT119_FN0),
- PINMUX_IRQ(EXT_IRQ16H(26), PORT121_FN0, PORT172_FN0),
- PINMUX_IRQ(EXT_IRQ16H(27), PORT122_FN0, PORT180_FN0),
- PINMUX_IRQ(EXT_IRQ16H(28), PORT123_FN0, PORT181_FN0),
- PINMUX_IRQ(EXT_IRQ16H(29), PORT129_FN0, PORT182_FN0),
- PINMUX_IRQ(EXT_IRQ16H(30), PORT130_FN0, PORT183_FN0),
- PINMUX_IRQ(EXT_IRQ16H(31), PORT138_FN0, PORT184_FN0),
+static const struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(EXT_IRQ16L(0), GPIO_PORT6, GPIO_PORT162),
+ PINMUX_IRQ(EXT_IRQ16L(1), GPIO_PORT12),
+ PINMUX_IRQ(EXT_IRQ16L(2), GPIO_PORT4, GPIO_PORT5),
+ PINMUX_IRQ(EXT_IRQ16L(3), GPIO_PORT8, GPIO_PORT16),
+ PINMUX_IRQ(EXT_IRQ16L(4), GPIO_PORT17, GPIO_PORT163),
+ PINMUX_IRQ(EXT_IRQ16L(5), GPIO_PORT18),
+ PINMUX_IRQ(EXT_IRQ16L(6), GPIO_PORT39, GPIO_PORT164),
+ PINMUX_IRQ(EXT_IRQ16L(7), GPIO_PORT40, GPIO_PORT167),
+ PINMUX_IRQ(EXT_IRQ16L(8), GPIO_PORT41, GPIO_PORT168),
+ PINMUX_IRQ(EXT_IRQ16L(9), GPIO_PORT42, GPIO_PORT169),
+ PINMUX_IRQ(EXT_IRQ16L(10), GPIO_PORT65),
+ PINMUX_IRQ(EXT_IRQ16L(11), GPIO_PORT67),
+ PINMUX_IRQ(EXT_IRQ16L(12), GPIO_PORT80, GPIO_PORT137),
+ PINMUX_IRQ(EXT_IRQ16L(13), GPIO_PORT81, GPIO_PORT145),
+ PINMUX_IRQ(EXT_IRQ16L(14), GPIO_PORT82, GPIO_PORT146),
+ PINMUX_IRQ(EXT_IRQ16L(15), GPIO_PORT83, GPIO_PORT147),
+ PINMUX_IRQ(EXT_IRQ16H(16), GPIO_PORT84, GPIO_PORT170),
+ PINMUX_IRQ(EXT_IRQ16H(17), GPIO_PORT85),
+ PINMUX_IRQ(EXT_IRQ16H(18), GPIO_PORT86),
+ PINMUX_IRQ(EXT_IRQ16H(19), GPIO_PORT87),
+ PINMUX_IRQ(EXT_IRQ16H(20), GPIO_PORT92),
+ PINMUX_IRQ(EXT_IRQ16H(21), GPIO_PORT93),
+ PINMUX_IRQ(EXT_IRQ16H(22), GPIO_PORT94),
+ PINMUX_IRQ(EXT_IRQ16H(23), GPIO_PORT95),
+ PINMUX_IRQ(EXT_IRQ16H(24), GPIO_PORT112),
+ PINMUX_IRQ(EXT_IRQ16H(25), GPIO_PORT119),
+ PINMUX_IRQ(EXT_IRQ16H(26), GPIO_PORT121, GPIO_PORT172),
+ PINMUX_IRQ(EXT_IRQ16H(27), GPIO_PORT122, GPIO_PORT180),
+ PINMUX_IRQ(EXT_IRQ16H(28), GPIO_PORT123, GPIO_PORT181),
+ PINMUX_IRQ(EXT_IRQ16H(29), GPIO_PORT129, GPIO_PORT182),
+ PINMUX_IRQ(EXT_IRQ16H(30), GPIO_PORT130, GPIO_PORT183),
+ PINMUX_IRQ(EXT_IRQ16H(31), GPIO_PORT138, GPIO_PORT184),
};
-struct sh_pfc_soc_info sh7372_pinmux_info = {
+const struct sh_pfc_soc_info sh7372_pinmux_info = {
.name = "sh7372_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PORT0,
- .last_gpio = GPIO_FN_SDENC_DV_CLKI,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 709008e..587f777 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -18,18 +18,18 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
#include <mach/sh73a0.h>
#include <mach/irqs.h>
+#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
- PORT_10(fn, pfx##2, sfx), PORT_10(fn, pfx##3, sfx), \
- PORT_10(fn, pfx##4, sfx), PORT_10(fn, pfx##5, sfx), \
- PORT_10(fn, pfx##6, sfx), PORT_10(fn, pfx##7, sfx), \
- PORT_10(fn, pfx##8, sfx), PORT_10(fn, pfx##9, sfx), \
+ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
PORT_10(fn, pfx##10, sfx), \
PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
@@ -66,14 +66,6 @@ enum {
PORT_ALL(IN), /* PORT0_IN -> PORT309_IN */
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */
- PINMUX_INPUT_PULLUP_END,
-
- PINMUX_INPUT_PULLDOWN_BEGIN,
- PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */
- PINMUX_INPUT_PULLDOWN_END,
-
PINMUX_OUTPUT_BEGIN,
PORT_ALL(OUT), /* PORT0_OUT -> PORT309_OUT */
PINMUX_OUTPUT_END,
@@ -468,328 +460,15 @@ enum {
EDBGREQ_PD_MARK,
EDBGREQ_PU_MARK,
- /* Functions with pull-ups */
- KEYIN0_PU_MARK,
- KEYIN1_PU_MARK,
- KEYIN2_PU_MARK,
- KEYIN3_PU_MARK,
- KEYIN4_PU_MARK,
- KEYIN5_PU_MARK,
- KEYIN6_PU_MARK,
- KEYIN7_PU_MARK,
- SDHICD0_PU_MARK,
- SDHID0_0_PU_MARK,
- SDHID0_1_PU_MARK,
- SDHID0_2_PU_MARK,
- SDHID0_3_PU_MARK,
- SDHICMD0_PU_MARK,
- SDHIWP0_PU_MARK,
- SDHID1_0_PU_MARK,
- SDHID1_1_PU_MARK,
- SDHID1_2_PU_MARK,
- SDHID1_3_PU_MARK,
- SDHICMD1_PU_MARK,
- SDHID2_0_PU_MARK,
- SDHID2_1_PU_MARK,
- SDHID2_2_PU_MARK,
- SDHID2_3_PU_MARK,
- SDHICMD2_PU_MARK,
- MMCCMD0_PU_MARK,
- MMCCMD1_PU_MARK,
- MMCD0_0_PU_MARK,
- MMCD0_1_PU_MARK,
- MMCD0_2_PU_MARK,
- MMCD0_3_PU_MARK,
- MMCD0_4_PU_MARK,
- MMCD0_5_PU_MARK,
- MMCD0_6_PU_MARK,
- MMCD0_7_PU_MARK,
- FSIBISLD_PU_MARK,
- FSIACK_PU_MARK,
- FSIAILR_PU_MARK,
- FSIAIBT_PU_MARK,
- FSIAISLD_PU_MARK,
-
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
- /* specify valid pin states for each pin in GPIO mode */
+#define _PORT_DATA(pfx, sfx) PORT_DATA_IO(pfx)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
- /* Table 25-1 (I/O and Pull U/D) */
- PORT_DATA_I_PD(0),
- PORT_DATA_I_PU(1),
- PORT_DATA_I_PU(2),
- PORT_DATA_I_PU(3),
- PORT_DATA_I_PU(4),
- PORT_DATA_I_PU(5),
- PORT_DATA_I_PU(6),
- PORT_DATA_I_PU(7),
- PORT_DATA_I_PU(8),
- PORT_DATA_I_PD(9),
- PORT_DATA_I_PD(10),
- PORT_DATA_I_PU_PD(11),
- PORT_DATA_IO_PU_PD(12),
- PORT_DATA_IO_PU_PD(13),
- PORT_DATA_IO_PU_PD(14),
- PORT_DATA_IO_PU_PD(15),
- PORT_DATA_IO_PD(16),
- PORT_DATA_IO_PD(17),
- PORT_DATA_IO_PU(18),
- PORT_DATA_IO_PU(19),
- PORT_DATA_O(20),
- PORT_DATA_O(21),
- PORT_DATA_O(22),
- PORT_DATA_O(23),
- PORT_DATA_O(24),
- PORT_DATA_I_PD(25),
- PORT_DATA_I_PD(26),
- PORT_DATA_IO_PU(27),
- PORT_DATA_IO_PU(28),
- PORT_DATA_IO_PD(29),
- PORT_DATA_IO_PD(30),
- PORT_DATA_IO_PU(31),
- PORT_DATA_IO_PD(32),
- PORT_DATA_I_PU_PD(33),
- PORT_DATA_IO_PD(34),
- PORT_DATA_I_PU_PD(35),
- PORT_DATA_IO_PD(36),
- PORT_DATA_IO(37),
- PORT_DATA_O(38),
- PORT_DATA_I_PU(39),
- PORT_DATA_I_PU_PD(40),
- PORT_DATA_O(41),
- PORT_DATA_IO_PD(42),
- PORT_DATA_IO_PU_PD(43),
- PORT_DATA_IO_PU_PD(44),
- PORT_DATA_IO_PD(45),
- PORT_DATA_IO_PD(46),
- PORT_DATA_IO_PD(47),
- PORT_DATA_I_PD(48),
- PORT_DATA_IO_PU_PD(49),
- PORT_DATA_IO_PD(50),
-
- PORT_DATA_IO_PD(51),
- PORT_DATA_O(52),
- PORT_DATA_IO_PU_PD(53),
- PORT_DATA_IO_PU_PD(54),
- PORT_DATA_IO_PD(55),
- PORT_DATA_I_PU_PD(56),
- PORT_DATA_IO(57),
- PORT_DATA_IO(58),
- PORT_DATA_IO(59),
- PORT_DATA_IO(60),
- PORT_DATA_IO(61),
- PORT_DATA_IO_PD(62),
- PORT_DATA_IO_PD(63),
- PORT_DATA_IO_PU_PD(64),
- PORT_DATA_IO_PD(65),
- PORT_DATA_IO_PU_PD(66),
- PORT_DATA_IO_PU_PD(67),
- PORT_DATA_IO_PU_PD(68),
- PORT_DATA_IO_PU_PD(69),
- PORT_DATA_IO_PU_PD(70),
- PORT_DATA_IO_PU_PD(71),
- PORT_DATA_IO_PU_PD(72),
- PORT_DATA_I_PU_PD(73),
- PORT_DATA_IO_PU(74),
- PORT_DATA_IO_PU(75),
- PORT_DATA_IO_PU(76),
- PORT_DATA_IO_PU(77),
- PORT_DATA_IO_PU(78),
- PORT_DATA_IO_PU(79),
- PORT_DATA_IO_PU(80),
- PORT_DATA_IO_PU(81),
- PORT_DATA_IO_PU(82),
- PORT_DATA_IO_PU(83),
- PORT_DATA_IO_PU(84),
- PORT_DATA_IO_PU(85),
- PORT_DATA_IO_PU(86),
- PORT_DATA_IO_PU(87),
- PORT_DATA_IO_PU(88),
- PORT_DATA_IO_PU(89),
- PORT_DATA_O(90),
- PORT_DATA_IO_PU(91),
- PORT_DATA_O(92),
- PORT_DATA_IO_PU(93),
- PORT_DATA_O(94),
- PORT_DATA_I_PU_PD(95),
- PORT_DATA_IO(96),
- PORT_DATA_IO(97),
- PORT_DATA_IO(98),
- PORT_DATA_I_PU(99),
- PORT_DATA_O(100),
- PORT_DATA_O(101),
- PORT_DATA_I_PU(102),
- PORT_DATA_IO_PD(103),
- PORT_DATA_I_PU_PD(104),
- PORT_DATA_I_PD(105),
- PORT_DATA_I_PD(106),
- PORT_DATA_I_PU_PD(107),
- PORT_DATA_I_PU_PD(108),
- PORT_DATA_IO_PD(109),
- PORT_DATA_IO_PD(110),
- PORT_DATA_IO_PU_PD(111),
- PORT_DATA_IO_PU_PD(112),
- PORT_DATA_IO_PU_PD(113),
- PORT_DATA_IO_PD(114),
- PORT_DATA_IO_PU(115),
- PORT_DATA_IO_PU(116),
- PORT_DATA_IO_PU_PD(117),
- PORT_DATA_IO_PU_PD(118),
- PORT_DATA_IO_PD(128),
-
- PORT_DATA_IO_PD(129),
- PORT_DATA_IO_PU_PD(130),
- PORT_DATA_IO_PD(131),
- PORT_DATA_IO_PD(132),
- PORT_DATA_IO_PD(133),
- PORT_DATA_IO_PU_PD(134),
- PORT_DATA_IO_PU_PD(135),
- PORT_DATA_IO_PU_PD(136),
- PORT_DATA_IO_PU_PD(137),
- PORT_DATA_IO_PD(138),
- PORT_DATA_IO_PD(139),
- PORT_DATA_IO_PD(140),
- PORT_DATA_IO_PD(141),
- PORT_DATA_IO_PD(142),
- PORT_DATA_IO_PD(143),
- PORT_DATA_IO_PU_PD(144),
- PORT_DATA_IO_PD(145),
- PORT_DATA_IO_PU_PD(146),
- PORT_DATA_IO_PU_PD(147),
- PORT_DATA_IO_PU_PD(148),
- PORT_DATA_IO_PU_PD(149),
- PORT_DATA_I_PU_PD(150),
- PORT_DATA_IO_PU_PD(151),
- PORT_DATA_IO_PU_PD(152),
- PORT_DATA_IO_PD(153),
- PORT_DATA_IO_PD(154),
- PORT_DATA_I_PU_PD(155),
- PORT_DATA_IO_PU_PD(156),
- PORT_DATA_I_PD(157),
- PORT_DATA_IO_PD(158),
- PORT_DATA_IO_PU_PD(159),
- PORT_DATA_IO_PU_PD(160),
- PORT_DATA_I_PU_PD(161),
- PORT_DATA_I_PU_PD(162),
- PORT_DATA_IO_PU_PD(163),
- PORT_DATA_I_PU_PD(164),
- PORT_DATA_IO_PD(192),
- PORT_DATA_IO_PU_PD(193),
- PORT_DATA_IO_PD(194),
- PORT_DATA_IO_PU_PD(195),
- PORT_DATA_IO_PD(196),
- PORT_DATA_IO_PD(197),
- PORT_DATA_IO_PD(198),
- PORT_DATA_IO_PD(199),
- PORT_DATA_IO_PU_PD(200),
- PORT_DATA_IO_PU_PD(201),
- PORT_DATA_IO_PU_PD(202),
- PORT_DATA_IO_PU_PD(203),
- PORT_DATA_IO_PU_PD(204),
- PORT_DATA_IO_PU_PD(205),
- PORT_DATA_IO_PU_PD(206),
- PORT_DATA_IO_PD(207),
- PORT_DATA_IO_PD(208),
- PORT_DATA_IO_PD(209),
- PORT_DATA_IO_PD(210),
- PORT_DATA_IO_PD(211),
- PORT_DATA_IO_PD(212),
- PORT_DATA_IO_PD(213),
- PORT_DATA_IO_PU_PD(214),
- PORT_DATA_IO_PU_PD(215),
- PORT_DATA_IO_PD(216),
- PORT_DATA_IO_PD(217),
- PORT_DATA_O(218),
- PORT_DATA_IO_PD(219),
- PORT_DATA_IO_PD(220),
- PORT_DATA_IO_PU_PD(221),
- PORT_DATA_IO_PU_PD(222),
- PORT_DATA_I_PU_PD(223),
- PORT_DATA_I_PU_PD(224),
-
- PORT_DATA_IO_PU_PD(225),
- PORT_DATA_O(226),
- PORT_DATA_IO_PU_PD(227),
- PORT_DATA_I_PU_PD(228),
- PORT_DATA_I_PD(229),
- PORT_DATA_IO(230),
- PORT_DATA_IO_PU_PD(231),
- PORT_DATA_IO_PU_PD(232),
- PORT_DATA_I_PU_PD(233),
- PORT_DATA_IO_PU_PD(234),
- PORT_DATA_IO_PU_PD(235),
- PORT_DATA_IO_PU_PD(236),
- PORT_DATA_IO_PD(237),
- PORT_DATA_IO_PU_PD(238),
- PORT_DATA_IO_PU_PD(239),
- PORT_DATA_IO_PU_PD(240),
- PORT_DATA_O(241),
- PORT_DATA_I_PD(242),
- PORT_DATA_IO_PU_PD(243),
- PORT_DATA_IO_PU_PD(244),
- PORT_DATA_IO_PU_PD(245),
- PORT_DATA_IO_PU_PD(246),
- PORT_DATA_IO_PU_PD(247),
- PORT_DATA_IO_PU_PD(248),
- PORT_DATA_IO_PU_PD(249),
- PORT_DATA_IO_PU_PD(250),
- PORT_DATA_IO_PU_PD(251),
- PORT_DATA_IO_PU_PD(252),
- PORT_DATA_IO_PU_PD(253),
- PORT_DATA_IO_PU_PD(254),
- PORT_DATA_IO_PU_PD(255),
- PORT_DATA_IO_PU_PD(256),
- PORT_DATA_IO_PU_PD(257),
- PORT_DATA_IO_PU_PD(258),
- PORT_DATA_IO_PU_PD(259),
- PORT_DATA_IO_PU_PD(260),
- PORT_DATA_IO_PU_PD(261),
- PORT_DATA_IO_PU_PD(262),
- PORT_DATA_IO_PU_PD(263),
- PORT_DATA_IO_PU_PD(264),
- PORT_DATA_IO_PU_PD(265),
- PORT_DATA_IO_PU_PD(266),
- PORT_DATA_IO_PU_PD(267),
- PORT_DATA_IO_PU_PD(268),
- PORT_DATA_IO_PU_PD(269),
- PORT_DATA_IO_PU_PD(270),
- PORT_DATA_IO_PU_PD(271),
- PORT_DATA_IO_PU_PD(272),
- PORT_DATA_IO_PU_PD(273),
- PORT_DATA_IO_PU_PD(274),
- PORT_DATA_IO_PU_PD(275),
- PORT_DATA_IO_PU_PD(276),
- PORT_DATA_IO_PU_PD(277),
- PORT_DATA_IO_PU_PD(278),
- PORT_DATA_IO_PU_PD(279),
- PORT_DATA_IO_PU_PD(280),
- PORT_DATA_O(281),
- PORT_DATA_O(282),
- PORT_DATA_I_PU(288),
- PORT_DATA_IO_PU_PD(289),
- PORT_DATA_IO_PU_PD(290),
- PORT_DATA_IO_PU_PD(291),
- PORT_DATA_IO_PU_PD(292),
- PORT_DATA_IO_PU_PD(293),
- PORT_DATA_IO_PU_PD(294),
- PORT_DATA_IO_PU_PD(295),
- PORT_DATA_IO_PU_PD(296),
- PORT_DATA_IO_PU_PD(297),
- PORT_DATA_IO_PU_PD(298),
-
- PORT_DATA_IO_PU_PD(299),
- PORT_DATA_IO_PU_PD(300),
- PORT_DATA_IO_PU_PD(301),
- PORT_DATA_IO_PU_PD(302),
- PORT_DATA_IO_PU_PD(303),
- PORT_DATA_IO_PU_PD(304),
- PORT_DATA_IO_PU_PD(305),
- PORT_DATA_O(306),
- PORT_DATA_O(307),
- PORT_DATA_I_PU(308),
- PORT_DATA_O(309),
+static const pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+ PINMUX_DATA_GP_ALL(),
/* Table 25-1 (Function 0-7) */
PINMUX_DATA(VBUS_0_MARK, PORT0_FN1),
@@ -1358,28 +1037,19 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(TS_SCK4_MARK, PORT268_FN3),
PINMUX_DATA(SDHICMD2_MARK, PORT269_FN1),
PINMUX_DATA(MMCCLK0_MARK, PORT270_FN1, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, PORT271_IN_PU,
- MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, PORT272_IN_PU,
- MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, PORT273_IN_PU,
- MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, PORT274_IN_PU,
- MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, PORT275_IN_PU,
- MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, MSEL4CR_MSEL15_0),
PINMUX_DATA(TS_SPSYNC5_MARK, PORT275_FN3),
- PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, PORT276_IN_PU,
- MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, MSEL4CR_MSEL15_0),
PINMUX_DATA(TS_SDAT5_MARK, PORT276_FN3),
- PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, PORT277_IN_PU,
- MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, MSEL4CR_MSEL15_0),
PINMUX_DATA(TS_SDEN5_MARK, PORT277_FN3),
- PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, PORT278_IN_PU,
- MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, MSEL4CR_MSEL15_0),
PINMUX_DATA(TS_SCK5_MARK, PORT278_FN3),
- PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, PORT279_IN_PU,
- MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, MSEL4CR_MSEL15_0),
PINMUX_DATA(RESETOUTS__MARK, PORT281_FN1), \
PINMUX_DATA(EXTAL2OUT_MARK, PORT281_FN2),
PINMUX_DATA(MCP_WAIT__MCP_FRB_MARK, PORT288_FN1),
@@ -1485,69 +1155,1791 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(RESETA_N_PU_OFF_MARK, MSEL4CR_MSEL4_1),
PINMUX_DATA(EDBGREQ_PD_MARK, MSEL4CR_MSEL1_0),
PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1),
+};
+
+#define SH73A0_PIN(pin, cfgs) \
+ { \
+ .name = __stringify(PORT##pin), \
+ .enum_id = PORT##pin##_DATA, \
+ .configs = cfgs, \
+ }
+
+#define __I (SH_PFC_PIN_CFG_INPUT)
+#define __O (SH_PFC_PIN_CFG_OUTPUT)
+#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
+#define __PD (SH_PFC_PIN_CFG_PULL_DOWN)
+#define __PU (SH_PFC_PIN_CFG_PULL_UP)
+#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
+
+#define SH73A0_PIN_I_PD(pin) SH73A0_PIN(pin, __I | __PD)
+#define SH73A0_PIN_I_PU(pin) SH73A0_PIN(pin, __I | __PU)
+#define SH73A0_PIN_I_PU_PD(pin) SH73A0_PIN(pin, __I | __PUD)
+#define SH73A0_PIN_IO(pin) SH73A0_PIN(pin, __IO)
+#define SH73A0_PIN_IO_PD(pin) SH73A0_PIN(pin, __IO | __PD)
+#define SH73A0_PIN_IO_PU(pin) SH73A0_PIN(pin, __IO | __PU)
+#define SH73A0_PIN_IO_PU_PD(pin) SH73A0_PIN(pin, __IO | __PUD)
+#define SH73A0_PIN_O(pin) SH73A0_PIN(pin, __O)
+
+static struct sh_pfc_pin pinmux_pins[] = {
+ /* Table 25-1 (I/O and Pull U/D) */
+ SH73A0_PIN_I_PD(0),
+ SH73A0_PIN_I_PU(1),
+ SH73A0_PIN_I_PU(2),
+ SH73A0_PIN_I_PU(3),
+ SH73A0_PIN_I_PU(4),
+ SH73A0_PIN_I_PU(5),
+ SH73A0_PIN_I_PU(6),
+ SH73A0_PIN_I_PU(7),
+ SH73A0_PIN_I_PU(8),
+ SH73A0_PIN_I_PD(9),
+ SH73A0_PIN_I_PD(10),
+ SH73A0_PIN_I_PU_PD(11),
+ SH73A0_PIN_IO_PU_PD(12),
+ SH73A0_PIN_IO_PU_PD(13),
+ SH73A0_PIN_IO_PU_PD(14),
+ SH73A0_PIN_IO_PU_PD(15),
+ SH73A0_PIN_IO_PD(16),
+ SH73A0_PIN_IO_PD(17),
+ SH73A0_PIN_IO_PU(18),
+ SH73A0_PIN_IO_PU(19),
+ SH73A0_PIN_O(20),
+ SH73A0_PIN_O(21),
+ SH73A0_PIN_O(22),
+ SH73A0_PIN_O(23),
+ SH73A0_PIN_O(24),
+ SH73A0_PIN_I_PD(25),
+ SH73A0_PIN_I_PD(26),
+ SH73A0_PIN_IO_PU(27),
+ SH73A0_PIN_IO_PU(28),
+ SH73A0_PIN_IO_PD(29),
+ SH73A0_PIN_IO_PD(30),
+ SH73A0_PIN_IO_PU(31),
+ SH73A0_PIN_IO_PD(32),
+ SH73A0_PIN_I_PU_PD(33),
+ SH73A0_PIN_IO_PD(34),
+ SH73A0_PIN_I_PU_PD(35),
+ SH73A0_PIN_IO_PD(36),
+ SH73A0_PIN_IO(37),
+ SH73A0_PIN_O(38),
+ SH73A0_PIN_I_PU(39),
+ SH73A0_PIN_I_PU_PD(40),
+ SH73A0_PIN_O(41),
+ SH73A0_PIN_IO_PD(42),
+ SH73A0_PIN_IO_PU_PD(43),
+ SH73A0_PIN_IO_PU_PD(44),
+ SH73A0_PIN_IO_PD(45),
+ SH73A0_PIN_IO_PD(46),
+ SH73A0_PIN_IO_PD(47),
+ SH73A0_PIN_I_PD(48),
+ SH73A0_PIN_IO_PU_PD(49),
+ SH73A0_PIN_IO_PD(50),
+ SH73A0_PIN_IO_PD(51),
+ SH73A0_PIN_O(52),
+ SH73A0_PIN_IO_PU_PD(53),
+ SH73A0_PIN_IO_PU_PD(54),
+ SH73A0_PIN_IO_PD(55),
+ SH73A0_PIN_I_PU_PD(56),
+ SH73A0_PIN_IO(57),
+ SH73A0_PIN_IO(58),
+ SH73A0_PIN_IO(59),
+ SH73A0_PIN_IO(60),
+ SH73A0_PIN_IO(61),
+ SH73A0_PIN_IO_PD(62),
+ SH73A0_PIN_IO_PD(63),
+ SH73A0_PIN_IO_PU_PD(64),
+ SH73A0_PIN_IO_PD(65),
+ SH73A0_PIN_IO_PU_PD(66),
+ SH73A0_PIN_IO_PU_PD(67),
+ SH73A0_PIN_IO_PU_PD(68),
+ SH73A0_PIN_IO_PU_PD(69),
+ SH73A0_PIN_IO_PU_PD(70),
+ SH73A0_PIN_IO_PU_PD(71),
+ SH73A0_PIN_IO_PU_PD(72),
+ SH73A0_PIN_I_PU_PD(73),
+ SH73A0_PIN_IO_PU(74),
+ SH73A0_PIN_IO_PU(75),
+ SH73A0_PIN_IO_PU(76),
+ SH73A0_PIN_IO_PU(77),
+ SH73A0_PIN_IO_PU(78),
+ SH73A0_PIN_IO_PU(79),
+ SH73A0_PIN_IO_PU(80),
+ SH73A0_PIN_IO_PU(81),
+ SH73A0_PIN_IO_PU(82),
+ SH73A0_PIN_IO_PU(83),
+ SH73A0_PIN_IO_PU(84),
+ SH73A0_PIN_IO_PU(85),
+ SH73A0_PIN_IO_PU(86),
+ SH73A0_PIN_IO_PU(87),
+ SH73A0_PIN_IO_PU(88),
+ SH73A0_PIN_IO_PU(89),
+ SH73A0_PIN_O(90),
+ SH73A0_PIN_IO_PU(91),
+ SH73A0_PIN_O(92),
+ SH73A0_PIN_IO_PU(93),
+ SH73A0_PIN_O(94),
+ SH73A0_PIN_I_PU_PD(95),
+ SH73A0_PIN_IO(96),
+ SH73A0_PIN_IO(97),
+ SH73A0_PIN_IO(98),
+ SH73A0_PIN_I_PU(99),
+ SH73A0_PIN_O(100),
+ SH73A0_PIN_O(101),
+ SH73A0_PIN_I_PU(102),
+ SH73A0_PIN_IO_PD(103),
+ SH73A0_PIN_I_PU_PD(104),
+ SH73A0_PIN_I_PD(105),
+ SH73A0_PIN_I_PD(106),
+ SH73A0_PIN_I_PU_PD(107),
+ SH73A0_PIN_I_PU_PD(108),
+ SH73A0_PIN_IO_PD(109),
+ SH73A0_PIN_IO_PD(110),
+ SH73A0_PIN_IO_PU_PD(111),
+ SH73A0_PIN_IO_PU_PD(112),
+ SH73A0_PIN_IO_PU_PD(113),
+ SH73A0_PIN_IO_PD(114),
+ SH73A0_PIN_IO_PU(115),
+ SH73A0_PIN_IO_PU(116),
+ SH73A0_PIN_IO_PU_PD(117),
+ SH73A0_PIN_IO_PU_PD(118),
+ SH73A0_PIN_IO_PD(128),
+ SH73A0_PIN_IO_PD(129),
+ SH73A0_PIN_IO_PU_PD(130),
+ SH73A0_PIN_IO_PD(131),
+ SH73A0_PIN_IO_PD(132),
+ SH73A0_PIN_IO_PD(133),
+ SH73A0_PIN_IO_PU_PD(134),
+ SH73A0_PIN_IO_PU_PD(135),
+ SH73A0_PIN_IO_PU_PD(136),
+ SH73A0_PIN_IO_PU_PD(137),
+ SH73A0_PIN_IO_PD(138),
+ SH73A0_PIN_IO_PD(139),
+ SH73A0_PIN_IO_PD(140),
+ SH73A0_PIN_IO_PD(141),
+ SH73A0_PIN_IO_PD(142),
+ SH73A0_PIN_IO_PD(143),
+ SH73A0_PIN_IO_PU_PD(144),
+ SH73A0_PIN_IO_PD(145),
+ SH73A0_PIN_IO_PU_PD(146),
+ SH73A0_PIN_IO_PU_PD(147),
+ SH73A0_PIN_IO_PU_PD(148),
+ SH73A0_PIN_IO_PU_PD(149),
+ SH73A0_PIN_I_PU_PD(150),
+ SH73A0_PIN_IO_PU_PD(151),
+ SH73A0_PIN_IO_PU_PD(152),
+ SH73A0_PIN_IO_PD(153),
+ SH73A0_PIN_IO_PD(154),
+ SH73A0_PIN_I_PU_PD(155),
+ SH73A0_PIN_IO_PU_PD(156),
+ SH73A0_PIN_I_PD(157),
+ SH73A0_PIN_IO_PD(158),
+ SH73A0_PIN_IO_PU_PD(159),
+ SH73A0_PIN_IO_PU_PD(160),
+ SH73A0_PIN_I_PU_PD(161),
+ SH73A0_PIN_I_PU_PD(162),
+ SH73A0_PIN_IO_PU_PD(163),
+ SH73A0_PIN_I_PU_PD(164),
+ SH73A0_PIN_IO_PD(192),
+ SH73A0_PIN_IO_PU_PD(193),
+ SH73A0_PIN_IO_PD(194),
+ SH73A0_PIN_IO_PU_PD(195),
+ SH73A0_PIN_IO_PD(196),
+ SH73A0_PIN_IO_PD(197),
+ SH73A0_PIN_IO_PD(198),
+ SH73A0_PIN_IO_PD(199),
+ SH73A0_PIN_IO_PU_PD(200),
+ SH73A0_PIN_IO_PU_PD(201),
+ SH73A0_PIN_IO_PU_PD(202),
+ SH73A0_PIN_IO_PU_PD(203),
+ SH73A0_PIN_IO_PU_PD(204),
+ SH73A0_PIN_IO_PU_PD(205),
+ SH73A0_PIN_IO_PU_PD(206),
+ SH73A0_PIN_IO_PD(207),
+ SH73A0_PIN_IO_PD(208),
+ SH73A0_PIN_IO_PD(209),
+ SH73A0_PIN_IO_PD(210),
+ SH73A0_PIN_IO_PD(211),
+ SH73A0_PIN_IO_PD(212),
+ SH73A0_PIN_IO_PD(213),
+ SH73A0_PIN_IO_PU_PD(214),
+ SH73A0_PIN_IO_PU_PD(215),
+ SH73A0_PIN_IO_PD(216),
+ SH73A0_PIN_IO_PD(217),
+ SH73A0_PIN_O(218),
+ SH73A0_PIN_IO_PD(219),
+ SH73A0_PIN_IO_PD(220),
+ SH73A0_PIN_IO_PU_PD(221),
+ SH73A0_PIN_IO_PU_PD(222),
+ SH73A0_PIN_I_PU_PD(223),
+ SH73A0_PIN_I_PU_PD(224),
+ SH73A0_PIN_IO_PU_PD(225),
+ SH73A0_PIN_O(226),
+ SH73A0_PIN_IO_PU_PD(227),
+ SH73A0_PIN_I_PU_PD(228),
+ SH73A0_PIN_I_PD(229),
+ SH73A0_PIN_IO(230),
+ SH73A0_PIN_IO_PU_PD(231),
+ SH73A0_PIN_IO_PU_PD(232),
+ SH73A0_PIN_I_PU_PD(233),
+ SH73A0_PIN_IO_PU_PD(234),
+ SH73A0_PIN_IO_PU_PD(235),
+ SH73A0_PIN_IO_PU_PD(236),
+ SH73A0_PIN_IO_PD(237),
+ SH73A0_PIN_IO_PU_PD(238),
+ SH73A0_PIN_IO_PU_PD(239),
+ SH73A0_PIN_IO_PU_PD(240),
+ SH73A0_PIN_O(241),
+ SH73A0_PIN_I_PD(242),
+ SH73A0_PIN_IO_PU_PD(243),
+ SH73A0_PIN_IO_PU_PD(244),
+ SH73A0_PIN_IO_PU_PD(245),
+ SH73A0_PIN_IO_PU_PD(246),
+ SH73A0_PIN_IO_PU_PD(247),
+ SH73A0_PIN_IO_PU_PD(248),
+ SH73A0_PIN_IO_PU_PD(249),
+ SH73A0_PIN_IO_PU_PD(250),
+ SH73A0_PIN_IO_PU_PD(251),
+ SH73A0_PIN_IO_PU_PD(252),
+ SH73A0_PIN_IO_PU_PD(253),
+ SH73A0_PIN_IO_PU_PD(254),
+ SH73A0_PIN_IO_PU_PD(255),
+ SH73A0_PIN_IO_PU_PD(256),
+ SH73A0_PIN_IO_PU_PD(257),
+ SH73A0_PIN_IO_PU_PD(258),
+ SH73A0_PIN_IO_PU_PD(259),
+ SH73A0_PIN_IO_PU_PD(260),
+ SH73A0_PIN_IO_PU_PD(261),
+ SH73A0_PIN_IO_PU_PD(262),
+ SH73A0_PIN_IO_PU_PD(263),
+ SH73A0_PIN_IO_PU_PD(264),
+ SH73A0_PIN_IO_PU_PD(265),
+ SH73A0_PIN_IO_PU_PD(266),
+ SH73A0_PIN_IO_PU_PD(267),
+ SH73A0_PIN_IO_PU_PD(268),
+ SH73A0_PIN_IO_PU_PD(269),
+ SH73A0_PIN_IO_PU_PD(270),
+ SH73A0_PIN_IO_PU_PD(271),
+ SH73A0_PIN_IO_PU_PD(272),
+ SH73A0_PIN_IO_PU_PD(273),
+ SH73A0_PIN_IO_PU_PD(274),
+ SH73A0_PIN_IO_PU_PD(275),
+ SH73A0_PIN_IO_PU_PD(276),
+ SH73A0_PIN_IO_PU_PD(277),
+ SH73A0_PIN_IO_PU_PD(278),
+ SH73A0_PIN_IO_PU_PD(279),
+ SH73A0_PIN_IO_PU_PD(280),
+ SH73A0_PIN_O(281),
+ SH73A0_PIN_O(282),
+ SH73A0_PIN_I_PU(288),
+ SH73A0_PIN_IO_PU_PD(289),
+ SH73A0_PIN_IO_PU_PD(290),
+ SH73A0_PIN_IO_PU_PD(291),
+ SH73A0_PIN_IO_PU_PD(292),
+ SH73A0_PIN_IO_PU_PD(293),
+ SH73A0_PIN_IO_PU_PD(294),
+ SH73A0_PIN_IO_PU_PD(295),
+ SH73A0_PIN_IO_PU_PD(296),
+ SH73A0_PIN_IO_PU_PD(297),
+ SH73A0_PIN_IO_PU_PD(298),
+ SH73A0_PIN_IO_PU_PD(299),
+ SH73A0_PIN_IO_PU_PD(300),
+ SH73A0_PIN_IO_PU_PD(301),
+ SH73A0_PIN_IO_PU_PD(302),
+ SH73A0_PIN_IO_PU_PD(303),
+ SH73A0_PIN_IO_PU_PD(304),
+ SH73A0_PIN_IO_PU_PD(305),
+ SH73A0_PIN_O(306),
+ SH73A0_PIN_O(307),
+ SH73A0_PIN_I_PU(308),
+ SH73A0_PIN_O(309),
+};
+
+static const struct pinmux_range pinmux_ranges[] = {
+ {.begin = 0, .end = 118,},
+ {.begin = 128, .end = 164,},
+ {.begin = 192, .end = 282,},
+ {.begin = 288, .end = 309,},
+};
+
+/* Pin numbers for pins without a corresponding GPIO port number are computed
+ * from the row and column numbers with a 1000 offset to avoid collisions with
+ * GPIO port numbers.
+ */
+#define PIN_NUMBER(row, col) (1000+((row)-1)*34+(col)-1)
+
+/* - BSC -------------------------------------------------------------------- */
+static const unsigned int bsc_data_0_7_pins[] = {
+ /* D[0:7] */
+ 74, 75, 76, 77, 78, 79, 80, 81,
+};
+static const unsigned int bsc_data_0_7_mux[] = {
+ D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
+ D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
+};
+static const unsigned int bsc_data_8_15_pins[] = {
+ /* D[8:15] */
+ 82, 83, 84, 85, 86, 87, 88, 89,
+};
+static const unsigned int bsc_data_8_15_mux[] = {
+ D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK,
+ D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK,
+};
+static const unsigned int bsc_cs4_pins[] = {
+ /* CS */
+ 90,
+};
+static const unsigned int bsc_cs4_mux[] = {
+ CS4__MARK,
+};
+static const unsigned int bsc_cs5_a_pins[] = {
+ /* CS */
+ 91,
+};
+static const unsigned int bsc_cs5_a_mux[] = {
+ CS5A__MARK,
+};
+static const unsigned int bsc_cs5_b_pins[] = {
+ /* CS */
+ 92,
+};
+static const unsigned int bsc_cs5_b_mux[] = {
+ CS5B__MARK,
+};
+static const unsigned int bsc_cs6_a_pins[] = {
+ /* CS */
+ 94,
+};
+static const unsigned int bsc_cs6_a_mux[] = {
+ CS6A__MARK,
+};
+static const unsigned int bsc_cs6_b_pins[] = {
+ /* CS */
+ 93,
+};
+static const unsigned int bsc_cs6_b_mux[] = {
+ CS6B__MARK,
+};
+static const unsigned int bsc_rd_pins[] = {
+ /* RD */
+ 96,
+};
+static const unsigned int bsc_rd_mux[] = {
+ RD__FSC_MARK,
+};
+static const unsigned int bsc_rdwr_0_pins[] = {
+ /* RDWR */
+ 91,
+};
+static const unsigned int bsc_rdwr_0_mux[] = {
+ PORT91_RDWR_MARK,
+};
+static const unsigned int bsc_rdwr_1_pins[] = {
+ /* RDWR */
+ 97,
+};
+static const unsigned int bsc_rdwr_1_mux[] = {
+ RDWR_FWE_MARK,
+};
+static const unsigned int bsc_rdwr_2_pins[] = {
+ /* RDWR */
+ 149,
+};
+static const unsigned int bsc_rdwr_2_mux[] = {
+ PORT149_RDWR_MARK,
+};
+static const unsigned int bsc_we0_pins[] = {
+ /* WE0 */
+ 97,
+};
+static const unsigned int bsc_we0_mux[] = {
+ WE0__FWE_MARK,
+};
+static const unsigned int bsc_we1_pins[] = {
+ /* WE1 */
+ 98,
+};
+static const unsigned int bsc_we1_mux[] = {
+ WE1__MARK,
+};
+/* - FSIA ------------------------------------------------------------------- */
+static const unsigned int fsia_mclk_in_pins[] = {
+ /* CK */
+ 49,
+};
+static const unsigned int fsia_mclk_in_mux[] = {
+ FSIACK_MARK,
+};
+static const unsigned int fsia_mclk_out_pins[] = {
+ /* OMC */
+ 49,
+};
+static const unsigned int fsia_mclk_out_mux[] = {
+ FSIAOMC_MARK,
+};
+static const unsigned int fsia_sclk_in_pins[] = {
+ /* ILR, IBT */
+ 50, 51,
+};
+static const unsigned int fsia_sclk_in_mux[] = {
+ FSIAILR_MARK, FSIAIBT_MARK,
+};
+static const unsigned int fsia_sclk_out_pins[] = {
+ /* OLR, OBT */
+ 50, 51,
+};
+static const unsigned int fsia_sclk_out_mux[] = {
+ FSIAOLR_MARK, FSIAOBT_MARK,
+};
+static const unsigned int fsia_data_in_pins[] = {
+ /* ISLD */
+ 55,
+};
+static const unsigned int fsia_data_in_mux[] = {
+ FSIAISLD_MARK,
+};
+static const unsigned int fsia_data_out_pins[] = {
+ /* OSLD */
+ 52,
+};
+static const unsigned int fsia_data_out_mux[] = {
+ FSIAOSLD_MARK,
+};
+static const unsigned int fsia_spdif_pins[] = {
+ /* SPDIF */
+ 53,
+};
+static const unsigned int fsia_spdif_mux[] = {
+ FSIASPDIF_MARK,
+};
+/* - FSIB ------------------------------------------------------------------- */
+static const unsigned int fsib_mclk_in_pins[] = {
+ /* CK */
+ 54,
+};
+static const unsigned int fsib_mclk_in_mux[] = {
+ FSIBCK_MARK,
+};
+static const unsigned int fsib_mclk_out_pins[] = {
+ /* OMC */
+ 54,
+};
+static const unsigned int fsib_mclk_out_mux[] = {
+ FSIBOMC_MARK,
+};
+static const unsigned int fsib_sclk_in_pins[] = {
+ /* ILR, IBT */
+ 37, 36,
+};
+static const unsigned int fsib_sclk_in_mux[] = {
+ FSIBILR_MARK, FSIBIBT_MARK,
+};
+static const unsigned int fsib_sclk_out_pins[] = {
+ /* OLR, OBT */
+ 37, 36,
+};
+static const unsigned int fsib_sclk_out_mux[] = {
+ FSIBOLR_MARK, FSIBOBT_MARK,
+};
+static const unsigned int fsib_data_in_pins[] = {
+ /* ISLD */
+ 39,
+};
+static const unsigned int fsib_data_in_mux[] = {
+ FSIBISLD_MARK,
+};
+static const unsigned int fsib_data_out_pins[] = {
+ /* OSLD */
+ 38,
+};
+static const unsigned int fsib_data_out_mux[] = {
+ FSIBOSLD_MARK,
+};
+static const unsigned int fsib_spdif_pins[] = {
+ /* SPDIF */
+ 53,
+};
+static const unsigned int fsib_spdif_mux[] = {
+ FSIBSPDIF_MARK,
+};
+/* - FSIC ------------------------------------------------------------------- */
+static const unsigned int fsic_mclk_in_pins[] = {
+ /* CK */
+ 54,
+};
+static const unsigned int fsic_mclk_in_mux[] = {
+ FSICCK_MARK,
+};
+static const unsigned int fsic_mclk_out_pins[] = {
+ /* OMC */
+ 54,
+};
+static const unsigned int fsic_mclk_out_mux[] = {
+ FSICOMC_MARK,
+};
+static const unsigned int fsic_sclk_in_pins[] = {
+ /* ILR, IBT */
+ 46, 45,
+};
+static const unsigned int fsic_sclk_in_mux[] = {
+ FSICILR_MARK, FSICIBT_MARK,
+};
+static const unsigned int fsic_sclk_out_pins[] = {
+ /* OLR, OBT */
+ 46, 45,
+};
+static const unsigned int fsic_sclk_out_mux[] = {
+ FSICOLR_MARK, FSICOBT_MARK,
+};
+static const unsigned int fsic_data_in_pins[] = {
+ /* ISLD */
+ 48,
+};
+static const unsigned int fsic_data_in_mux[] = {
+ FSICISLD_MARK,
+};
+static const unsigned int fsic_data_out_pins[] = {
+ /* OSLD, OSLDT1, OSLDT2, OSLDT3 */
+ 47, 44, 42, 16,
+};
+static const unsigned int fsic_data_out_mux[] = {
+ FSICOSLD_MARK, FSICOSLDT1_MARK, FSICOSLDT2_MARK, FSICOSLDT3_MARK,
+};
+static const unsigned int fsic_spdif_0_pins[] = {
+ /* SPDIF */
+ 53,
+};
+static const unsigned int fsic_spdif_0_mux[] = {
+ PORT53_FSICSPDIF_MARK,
+};
+static const unsigned int fsic_spdif_1_pins[] = {
+ /* SPDIF */
+ 47,
+};
+static const unsigned int fsic_spdif_1_mux[] = {
+ PORT47_FSICSPDIF_MARK,
+};
+/* - FSID ------------------------------------------------------------------- */
+static const unsigned int fsid_sclk_in_pins[] = {
+ /* ILR, IBT */
+ 46, 45,
+};
+static const unsigned int fsid_sclk_in_mux[] = {
+ FSIDILR_MARK, FSIDIBT_MARK,
+};
+static const unsigned int fsid_sclk_out_pins[] = {
+ /* OLR, OBT */
+ 46, 45,
+};
+static const unsigned int fsid_sclk_out_mux[] = {
+ FSIDOLR_MARK, FSIDOBT_MARK,
+};
+static const unsigned int fsid_data_in_pins[] = {
+ /* ISLD */
+ 48,
+};
+static const unsigned int fsid_data_in_mux[] = {
+ FSIDISLD_MARK,
+};
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_0_pins[] = {
+ /* SCL, SDA */
+ 237, 236,
+};
+static const unsigned int i2c2_0_mux[] = {
+ PORT237_I2C_SCL2_MARK, PORT236_I2C_SDA2_MARK,
+};
+static const unsigned int i2c2_1_pins[] = {
+ /* SCL, SDA */
+ 27, 28,
+};
+static const unsigned int i2c2_1_mux[] = {
+ PORT27_I2C_SCL2_MARK, PORT28_I2C_SDA2_MARK,
+};
+static const unsigned int i2c2_2_pins[] = {
+ /* SCL, SDA */
+ 115, 116,
+};
+static const unsigned int i2c2_2_mux[] = {
+ PORT115_I2C_SCL2_MARK, PORT116_I2C_SDA2_MARK,
+};
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_0_pins[] = {
+ /* SCL, SDA */
+ 248, 249,
+};
+static const unsigned int i2c3_0_mux[] = {
+ PORT248_I2C_SCL3_MARK, PORT249_I2C_SDA3_MARK,
+};
+static const unsigned int i2c3_1_pins[] = {
+ /* SCL, SDA */
+ 27, 28,
+};
+static const unsigned int i2c3_1_mux[] = {
+ PORT27_I2C_SCL3_MARK, PORT28_I2C_SDA3_MARK,
+};
+static const unsigned int i2c3_2_pins[] = {
+ /* SCL, SDA */
+ 115, 116,
+};
+static const unsigned int i2c3_2_mux[] = {
+ PORT115_I2C_SCL3_MARK, PORT116_I2C_SDA3_MARK,
+};
+/* - IrDA ------------------------------------------------------------------- */
+static const unsigned int irda_0_pins[] = {
+ /* OUT, IN, FIRSEL */
+ 241, 242, 243,
+};
+static const unsigned int irda_0_mux[] = {
+ PORT241_IRDA_OUT_MARK, PORT242_IRDA_IN_MARK, PORT243_IRDA_FIRSEL_MARK,
+};
+static const unsigned int irda_1_pins[] = {
+ /* OUT, IN, FIRSEL */
+ 49, 53, 54,
+};
+static const unsigned int irda_1_mux[] = {
+ PORT49_IRDA_OUT_MARK, PORT53_IRDA_IN_MARK, PORT54_IRDA_FIRSEL_MARK,
+};
+/* - KEYSC ------------------------------------------------------------------ */
+static const unsigned int keysc_in5_pins[] = {
+ /* KEYIN[0:4] */
+ 66, 67, 68, 69, 70,
+};
+static const unsigned int keysc_in5_mux[] = {
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK,
+ KEYIN4_MARK,
+};
+static const unsigned int keysc_in6_pins[] = {
+ /* KEYIN[0:5] */
+ 66, 67, 68, 69, 70, 71,
+};
+static const unsigned int keysc_in6_mux[] = {
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK,
+ KEYIN4_MARK, KEYIN5_MARK,
+};
+static const unsigned int keysc_in7_pins[] = {
+ /* KEYIN[0:6] */
+ 66, 67, 68, 69, 70, 71, 72,
+};
+static const unsigned int keysc_in7_mux[] = {
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK,
+ KEYIN4_MARK, KEYIN5_MARK, KEYIN6_MARK,
+};
+static const unsigned int keysc_in8_pins[] = {
+ /* KEYIN[0:7] */
+ 66, 67, 68, 69, 70, 71, 72, 73,
+};
+static const unsigned int keysc_in8_mux[] = {
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK,
+ KEYIN4_MARK, KEYIN5_MARK, KEYIN6_MARK, KEYIN7_MARK,
+};
+static const unsigned int keysc_out04_pins[] = {
+ /* KEYOUT[0:4] */
+ 65, 64, 63, 62, 61,
+};
+static const unsigned int keysc_out04_mux[] = {
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK, KEYOUT4_MARK,
+};
+static const unsigned int keysc_out5_pins[] = {
+ /* KEYOUT5 */
+ 60,
+};
+static const unsigned int keysc_out5_mux[] = {
+ KEYOUT5_MARK,
+};
+static const unsigned int keysc_out6_0_pins[] = {
+ /* KEYOUT6 */
+ 59,
+};
+static const unsigned int keysc_out6_0_mux[] = {
+ PORT59_KEYOUT6_MARK,
+};
+static const unsigned int keysc_out6_1_pins[] = {
+ /* KEYOUT6 */
+ 131,
+};
+static const unsigned int keysc_out6_1_mux[] = {
+ PORT131_KEYOUT6_MARK,
+};
+static const unsigned int keysc_out6_2_pins[] = {
+ /* KEYOUT6 */
+ 143,
+};
+static const unsigned int keysc_out6_2_mux[] = {
+ PORT143_KEYOUT6_MARK,
+};
+static const unsigned int keysc_out7_0_pins[] = {
+ /* KEYOUT7 */
+ 58,
+};
+static const unsigned int keysc_out7_0_mux[] = {
+ PORT58_KEYOUT7_MARK,
+};
+static const unsigned int keysc_out7_1_pins[] = {
+ /* KEYOUT7 */
+ 132,
+};
+static const unsigned int keysc_out7_1_mux[] = {
+ PORT132_KEYOUT7_MARK,
+};
+static const unsigned int keysc_out7_2_pins[] = {
+ /* KEYOUT7 */
+ 144,
+};
+static const unsigned int keysc_out7_2_mux[] = {
+ PORT144_KEYOUT7_MARK,
+};
+static const unsigned int keysc_out8_0_pins[] = {
+ /* KEYOUT8 */
+ PIN_NUMBER(6, 26),
+};
+static const unsigned int keysc_out8_0_mux[] = {
+ KEYOUT8_MARK,
+};
+static const unsigned int keysc_out8_1_pins[] = {
+ /* KEYOUT8 */
+ 136,
+};
+static const unsigned int keysc_out8_1_mux[] = {
+ PORT136_KEYOUT8_MARK,
+};
+static const unsigned int keysc_out8_2_pins[] = {
+ /* KEYOUT8 */
+ 138,
+};
+static const unsigned int keysc_out8_2_mux[] = {
+ PORT138_KEYOUT8_MARK,
+};
+static const unsigned int keysc_out9_0_pins[] = {
+ /* KEYOUT9 */
+ 137,
+};
+static const unsigned int keysc_out9_0_mux[] = {
+ PORT137_KEYOUT9_MARK,
+};
+static const unsigned int keysc_out9_1_pins[] = {
+ /* KEYOUT9 */
+ 139,
+};
+static const unsigned int keysc_out9_1_mux[] = {
+ PORT139_KEYOUT9_MARK,
+};
+static const unsigned int keysc_out9_2_pins[] = {
+ /* KEYOUT9 */
+ 149,
+};
+static const unsigned int keysc_out9_2_mux[] = {
+ PORT149_KEYOUT9_MARK,
+};
+static const unsigned int keysc_out10_0_pins[] = {
+ /* KEYOUT10 */
+ 132,
+};
+static const unsigned int keysc_out10_0_mux[] = {
+ PORT132_KEYOUT10_MARK,
+};
+static const unsigned int keysc_out10_1_pins[] = {
+ /* KEYOUT10 */
+ 142,
+};
+static const unsigned int keysc_out10_1_mux[] = {
+ PORT142_KEYOUT10_MARK,
+};
+static const unsigned int keysc_out11_0_pins[] = {
+ /* KEYOUT11 */
+ 131,
+};
+static const unsigned int keysc_out11_0_mux[] = {
+ PORT131_KEYOUT11_MARK,
+};
+static const unsigned int keysc_out11_1_pins[] = {
+ /* KEYOUT11 */
+ 143,
+};
+static const unsigned int keysc_out11_1_mux[] = {
+ PORT143_KEYOUT11_MARK,
+};
+/* - LCD -------------------------------------------------------------------- */
+static const unsigned int lcd_data8_pins[] = {
+ /* D[0:7] */
+ 192, 193, 194, 195, 196, 197, 198, 199,
+};
+static const unsigned int lcd_data8_mux[] = {
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+};
+static const unsigned int lcd_data9_pins[] = {
+ /* D[0:8] */
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200,
+};
+static const unsigned int lcd_data9_mux[] = {
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK,
+};
+static const unsigned int lcd_data12_pins[] = {
+ /* D[0:11] */
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203,
+};
+static const unsigned int lcd_data12_mux[] = {
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
+};
+static const unsigned int lcd_data16_pins[] = {
+ /* D[0:15] */
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+};
+static const unsigned int lcd_data16_mux[] = {
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
+ LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
+};
+static const unsigned int lcd_data18_pins[] = {
+ /* D[0:17] */
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209,
+};
+static const unsigned int lcd_data18_mux[] = {
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
+ LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
+ LCDD16_MARK, LCDD17_MARK,
+};
+static const unsigned int lcd_data24_pins[] = {
+ /* D[0:23] */
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215
+};
+static const unsigned int lcd_data24_mux[] = {
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
+ LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
+ LCDD16_MARK, LCDD17_MARK, LCDD18_MARK, LCDD19_MARK,
+ LCDD20_MARK, LCDD21_MARK, LCDD22_MARK, LCDD23_MARK,
+};
+static const unsigned int lcd_display_pins[] = {
+ /* DON */
+ 222,
+};
+static const unsigned int lcd_display_mux[] = {
+ LCDDON_MARK,
+};
+static const unsigned int lcd_lclk_pins[] = {
+ /* LCLK */
+ 221,
+};
+static const unsigned int lcd_lclk_mux[] = {
+ LCDLCLK_MARK,
+};
+static const unsigned int lcd_sync_pins[] = {
+ /* VSYN, HSYN, DCK, DISP */
+ 220, 218, 216, 219,
+};
+static const unsigned int lcd_sync_mux[] = {
+ LCDVSYN_MARK, LCDHSYN_MARK, LCDDCK_MARK, LCDDISP_MARK,
+};
+static const unsigned int lcd_sys_pins[] = {
+ /* CS, WR, RD, RS */
+ 218, 216, 217, 219,
+};
+static const unsigned int lcd_sys_mux[] = {
+ LCDCS__MARK, LCDWR__MARK, LCDRD__MARK, LCDRS_MARK,
+};
+/* - LCD2 ------------------------------------------------------------------- */
+static const unsigned int lcd2_data8_pins[] = {
+ /* D[0:7] */
+ 128, 129, 142, 143, 144, 145, 138, 139,
+};
+static const unsigned int lcd2_data8_mux[] = {
+ LCD2D0_MARK, LCD2D1_MARK, LCD2D2_MARK, LCD2D3_MARK,
+ LCD2D4_MARK, LCD2D5_MARK, LCD2D6_MARK, LCD2D7_MARK,
+};
+static const unsigned int lcd2_data9_pins[] = {
+ /* D[0:8] */
+ 128, 129, 142, 143, 144, 145, 138, 139,
+ 140,
+};
+static const unsigned int lcd2_data9_mux[] = {
+ LCD2D0_MARK, LCD2D1_MARK, LCD2D2_MARK, LCD2D3_MARK,
+ LCD2D4_MARK, LCD2D5_MARK, LCD2D6_MARK, LCD2D7_MARK,
+ LCD2D8_MARK,
+};
+static const unsigned int lcd2_data12_pins[] = {
+ /* D[0:12] */
+ 128, 129, 142, 143, 144, 145, 138, 139,
+ 140, 141, 130, 131,
+};
+static const unsigned int lcd2_data12_mux[] = {
+ LCD2D0_MARK, LCD2D1_MARK, LCD2D2_MARK, LCD2D3_MARK,
+ LCD2D4_MARK, LCD2D5_MARK, LCD2D6_MARK, LCD2D7_MARK,
+ LCD2D8_MARK, LCD2D9_MARK, LCD2D10_MARK, LCD2D11_MARK,
+};
+static const unsigned int lcd2_data16_pins[] = {
+ /* D[0:15] */
+ 128, 129, 142, 143, 144, 145, 138, 139,
+ 140, 141, 130, 131, 132, 133, 134, 135,
+};
+static const unsigned int lcd2_data16_mux[] = {
+ LCD2D0_MARK, LCD2D1_MARK, LCD2D2_MARK, LCD2D3_MARK,
+ LCD2D4_MARK, LCD2D5_MARK, LCD2D6_MARK, LCD2D7_MARK,
+ LCD2D8_MARK, LCD2D9_MARK, LCD2D10_MARK, LCD2D11_MARK,
+ LCD2D12_MARK, LCD2D13_MARK, LCD2D14_MARK, LCD2D15_MARK,
+};
+static const unsigned int lcd2_data18_pins[] = {
+ /* D[0:17] */
+ 128, 129, 142, 143, 144, 145, 138, 139,
+ 140, 141, 130, 131, 132, 133, 134, 135,
+ 136, 137,
+};
+static const unsigned int lcd2_data18_mux[] = {
+ LCD2D0_MARK, LCD2D1_MARK, LCD2D2_MARK, LCD2D3_MARK,
+ LCD2D4_MARK, LCD2D5_MARK, LCD2D6_MARK, LCD2D7_MARK,
+ LCD2D8_MARK, LCD2D9_MARK, LCD2D10_MARK, LCD2D11_MARK,
+ LCD2D12_MARK, LCD2D13_MARK, LCD2D14_MARK, LCD2D15_MARK,
+ LCD2D16_MARK, LCD2D17_MARK,
+};
+static const unsigned int lcd2_data24_pins[] = {
+ /* D[0:23] */
+ 128, 129, 142, 143, 144, 145, 138, 139,
+ 140, 141, 130, 131, 132, 133, 134, 135,
+ 136, 137, 146, 147, 234, 235, 238, 239
+};
+static const unsigned int lcd2_data24_mux[] = {
+ LCD2D0_MARK, LCD2D1_MARK, LCD2D2_MARK, LCD2D3_MARK,
+ LCD2D4_MARK, LCD2D5_MARK, LCD2D6_MARK, LCD2D7_MARK,
+ LCD2D8_MARK, LCD2D9_MARK, LCD2D10_MARK, LCD2D11_MARK,
+ LCD2D12_MARK, LCD2D13_MARK, LCD2D14_MARK, LCD2D15_MARK,
+ LCD2D16_MARK, LCD2D17_MARK, LCD2D18_MARK, LCD2D19_MARK,
+ LCD2D20_MARK, LCD2D21_MARK, LCD2D22_MARK, LCD2D23_MARK,
+};
+static const unsigned int lcd2_sync_0_pins[] = {
+ /* VSYN, HSYN, DCK, DISP */
+ 128, 129, 146, 145,
+};
+static const unsigned int lcd2_sync_0_mux[] = {
+ PORT128_LCD2VSYN_MARK, PORT129_LCD2HSYN_MARK,
+ LCD2DCK_MARK, PORT145_LCD2DISP_MARK,
+};
+static const unsigned int lcd2_sync_1_pins[] = {
+ /* VSYN, HSYN, DCK, DISP */
+ 222, 221, 219, 217,
+};
+static const unsigned int lcd2_sync_1_mux[] = {
+ PORT222_LCD2VSYN_MARK, PORT221_LCD2HSYN_MARK,
+ LCD2DCK_2_MARK, PORT217_LCD2DISP_MARK,
+};
+static const unsigned int lcd2_sys_0_pins[] = {
+ /* CS, WR, RD, RS */
+ 129, 146, 147, 145,
+};
+static const unsigned int lcd2_sys_0_mux[] = {
+ PORT129_LCD2CS__MARK, PORT146_LCD2WR__MARK,
+ LCD2RD__MARK, PORT145_LCD2RS_MARK,
+};
+static const unsigned int lcd2_sys_1_pins[] = {
+ /* CS, WR, RD, RS */
+ 221, 219, 147, 217,
+};
+static const unsigned int lcd2_sys_1_mux[] = {
+ PORT221_LCD2CS__MARK, PORT219_LCD2WR__MARK,
+ LCD2RD__MARK, PORT217_LCD2RS_MARK,
+};
+/* - MMCIF ------------------------------------------------------------------ */
+static const unsigned int mmc0_data1_0_pins[] = {
+ /* D[0] */
+ 271,
+};
+static const unsigned int mmc0_data1_0_mux[] = {
+ MMCD0_0_MARK,
+};
+static const unsigned int mmc0_data4_0_pins[] = {
+ /* D[0:3] */
+ 271, 272, 273, 274,
+};
+static const unsigned int mmc0_data4_0_mux[] = {
+ MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
+};
+static const unsigned int mmc0_data8_0_pins[] = {
+ /* D[0:7] */
+ 271, 272, 273, 274, 275, 276, 277, 278,
+};
+static const unsigned int mmc0_data8_0_mux[] = {
+ MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
+ MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
+};
+static const unsigned int mmc0_ctrl_0_pins[] = {
+ /* CMD, CLK */
+ 279, 270,
+};
+static const unsigned int mmc0_ctrl_0_mux[] = {
+ MMCCMD0_MARK, MMCCLK0_MARK,
+};
- /* Functions with pull-ups */
- PINMUX_DATA(KEYIN0_PU_MARK, PORT66_FN2, PORT66_IN_PU),
- PINMUX_DATA(KEYIN1_PU_MARK, PORT67_FN2, PORT67_IN_PU),
- PINMUX_DATA(KEYIN2_PU_MARK, PORT68_FN2, PORT68_IN_PU),
- PINMUX_DATA(KEYIN3_PU_MARK, PORT69_FN2, PORT69_IN_PU),
- PINMUX_DATA(KEYIN4_PU_MARK, PORT70_FN2, PORT70_IN_PU),
- PINMUX_DATA(KEYIN5_PU_MARK, PORT71_FN2, PORT71_IN_PU),
- PINMUX_DATA(KEYIN6_PU_MARK, PORT72_FN2, PORT72_IN_PU),
- PINMUX_DATA(KEYIN7_PU_MARK, PORT73_FN2, PORT73_IN_PU),
-
- PINMUX_DATA(SDHICD0_PU_MARK, PORT251_FN1, PORT251_IN_PU),
- PINMUX_DATA(SDHID0_0_PU_MARK, PORT252_FN1, PORT252_IN_PU),
- PINMUX_DATA(SDHID0_1_PU_MARK, PORT253_FN1, PORT253_IN_PU),
- PINMUX_DATA(SDHID0_2_PU_MARK, PORT254_FN1, PORT254_IN_PU),
- PINMUX_DATA(SDHID0_3_PU_MARK, PORT255_FN1, PORT255_IN_PU),
- PINMUX_DATA(SDHICMD0_PU_MARK, PORT256_FN1, PORT256_IN_PU),
- PINMUX_DATA(SDHIWP0_PU_MARK, PORT257_FN1, PORT256_IN_PU),
- PINMUX_DATA(SDHID1_0_PU_MARK, PORT259_FN1, PORT259_IN_PU),
- PINMUX_DATA(SDHID1_1_PU_MARK, PORT260_FN1, PORT260_IN_PU),
- PINMUX_DATA(SDHID1_2_PU_MARK, PORT261_FN1, PORT261_IN_PU),
- PINMUX_DATA(SDHID1_3_PU_MARK, PORT262_FN1, PORT262_IN_PU),
- PINMUX_DATA(SDHICMD1_PU_MARK, PORT263_FN1, PORT263_IN_PU),
- PINMUX_DATA(SDHID2_0_PU_MARK, PORT265_FN1, PORT265_IN_PU),
- PINMUX_DATA(SDHID2_1_PU_MARK, PORT266_FN1, PORT266_IN_PU),
- PINMUX_DATA(SDHID2_2_PU_MARK, PORT267_FN1, PORT267_IN_PU),
- PINMUX_DATA(SDHID2_3_PU_MARK, PORT268_FN1, PORT268_IN_PU),
- PINMUX_DATA(SDHICMD2_PU_MARK, PORT269_FN1, PORT269_IN_PU),
-
- PINMUX_DATA(MMCCMD0_PU_MARK, PORT279_FN1, PORT279_IN_PU,
- MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU,
- MSEL4CR_MSEL15_1),
-
- PINMUX_DATA(MMCD0_0_PU_MARK,
- PORT271_FN1, PORT271_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_1_PU_MARK,
- PORT272_FN1, PORT272_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_2_PU_MARK,
- PORT273_FN1, PORT273_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_3_PU_MARK,
- PORT274_FN1, PORT274_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_4_PU_MARK,
- PORT275_FN1, PORT275_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_5_PU_MARK,
- PORT276_FN1, PORT276_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_6_PU_MARK,
- PORT277_FN1, PORT277_IN_PU, MSEL4CR_MSEL15_0),
- PINMUX_DATA(MMCD0_7_PU_MARK,
- PORT278_FN1, PORT278_IN_PU, MSEL4CR_MSEL15_0),
-
- PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU),
- PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU),
- PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU),
- PINMUX_DATA(FSIAIBT_PU_MARK, PORT51_FN5, PORT51_IN_PU),
- PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU),
-};
-
-static struct pinmux_gpio pinmux_gpios[] = {
- GPIO_PORT_ALL(),
+static const unsigned int mmc0_data1_1_pins[] = {
+ /* D[0] */
+ 305,
+};
+static const unsigned int mmc0_data1_1_mux[] = {
+ MMCD1_0_MARK,
+};
+static const unsigned int mmc0_data4_1_pins[] = {
+ /* D[0:3] */
+ 305, 304, 303, 302,
+};
+static const unsigned int mmc0_data4_1_mux[] = {
+ MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
+};
+static const unsigned int mmc0_data8_1_pins[] = {
+ /* D[0:7] */
+ 305, 304, 303, 302, 301, 300, 299, 298,
+};
+static const unsigned int mmc0_data8_1_mux[] = {
+ MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
+ MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
+};
+static const unsigned int mmc0_ctrl_1_pins[] = {
+ /* CMD, CLK */
+ 297, 289,
+};
+static const unsigned int mmc0_ctrl_1_mux[] = {
+ MMCCMD1_MARK, MMCCLK1_MARK,
+};
+/* - SCIFA0 ----------------------------------------------------------------- */
+static const unsigned int scifa0_data_pins[] = {
+ /* RXD, TXD */
+ 43, 17,
+};
+static const unsigned int scifa0_data_mux[] = {
+ SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
+};
+static const unsigned int scifa0_clk_pins[] = {
+ /* SCK */
+ 16,
+};
+static const unsigned int scifa0_clk_mux[] = {
+ SCIFA0_SCK_MARK,
+};
+static const unsigned int scifa0_ctrl_pins[] = {
+ /* RTS, CTS */
+ 42, 44,
+};
+static const unsigned int scifa0_ctrl_mux[] = {
+ SCIFA0_RTS__MARK, SCIFA0_CTS__MARK,
+};
+/* - SCIFA1 ----------------------------------------------------------------- */
+static const unsigned int scifa1_data_pins[] = {
+ /* RXD, TXD */
+ 228, 225,
+};
+static const unsigned int scifa1_data_mux[] = {
+ SCIFA1_RXD_MARK, SCIFA1_TXD_MARK,
+};
+static const unsigned int scifa1_clk_pins[] = {
+ /* SCK */
+ 226,
+};
+static const unsigned int scifa1_clk_mux[] = {
+ SCIFA1_SCK_MARK,
+};
+static const unsigned int scifa1_ctrl_pins[] = {
+ /* RTS, CTS */
+ 227, 229,
+};
+static const unsigned int scifa1_ctrl_mux[] = {
+ SCIFA1_RTS__MARK, SCIFA1_CTS__MARK,
+};
+/* - SCIFA2 ----------------------------------------------------------------- */
+static const unsigned int scifa2_data_0_pins[] = {
+ /* RXD, TXD */
+ 155, 154,
+};
+static const unsigned int scifa2_data_0_mux[] = {
+ SCIFA2_RXD1_MARK, SCIFA2_TXD1_MARK,
+};
+static const unsigned int scifa2_clk_0_pins[] = {
+ /* SCK */
+ 158,
+};
+static const unsigned int scifa2_clk_0_mux[] = {
+ SCIFA2_SCK1_MARK,
+};
+static const unsigned int scifa2_ctrl_0_pins[] = {
+ /* RTS, CTS */
+ 156, 157,
+};
+static const unsigned int scifa2_ctrl_0_mux[] = {
+ SCIFA2_RTS1__MARK, SCIFA2_CTS1__MARK,
+};
+static const unsigned int scifa2_data_1_pins[] = {
+ /* RXD, TXD */
+ 233, 230,
+};
+static const unsigned int scifa2_data_1_mux[] = {
+ SCIFA2_RXD2_MARK, SCIFA2_TXD2_MARK,
+};
+static const unsigned int scifa2_clk_1_pins[] = {
+ /* SCK */
+ 232,
+};
+static const unsigned int scifa2_clk_1_mux[] = {
+ SCIFA2_SCK2_MARK,
+};
+static const unsigned int scifa2_ctrl_1_pins[] = {
+ /* RTS, CTS */
+ 234, 231,
+};
+static const unsigned int scifa2_ctrl_1_mux[] = {
+ SCIFA2_RTS2__MARK, SCIFA2_CTS2__MARK,
+};
+/* - SCIFA3 ----------------------------------------------------------------- */
+static const unsigned int scifa3_data_pins[] = {
+ /* RXD, TXD */
+ 108, 110,
+};
+static const unsigned int scifa3_data_mux[] = {
+ SCIFA3_RXD_MARK, SCIFA3_TXD_MARK,
+};
+static const unsigned int scifa3_ctrl_pins[] = {
+ /* RTS, CTS */
+ 109, 107,
+};
+static const unsigned int scifa3_ctrl_mux[] = {
+ SCIFA3_RTS__MARK, SCIFA3_CTS__MARK,
+};
+/* - SCIFA4 ----------------------------------------------------------------- */
+static const unsigned int scifa4_data_pins[] = {
+ /* RXD, TXD */
+ 33, 32,
+};
+static const unsigned int scifa4_data_mux[] = {
+ SCIFA4_RXD_MARK, SCIFA4_TXD_MARK,
+};
+static const unsigned int scifa4_ctrl_pins[] = {
+ /* RTS, CTS */
+ 34, 35,
+};
+static const unsigned int scifa4_ctrl_mux[] = {
+ SCIFA4_RTS__MARK, SCIFA4_CTS__MARK,
+};
+/* - SCIFA5 ----------------------------------------------------------------- */
+static const unsigned int scifa5_data_0_pins[] = {
+ /* RXD, TXD */
+ 246, 247,
+};
+static const unsigned int scifa5_data_0_mux[] = {
+ PORT246_SCIFA5_RXD_MARK, PORT247_SCIFA5_TXD_MARK,
+};
+static const unsigned int scifa5_clk_0_pins[] = {
+ /* SCK */
+ 248,
+};
+static const unsigned int scifa5_clk_0_mux[] = {
+ PORT248_SCIFA5_SCK_MARK,
+};
+static const unsigned int scifa5_ctrl_0_pins[] = {
+ /* RTS, CTS */
+ 245, 244,
+};
+static const unsigned int scifa5_ctrl_0_mux[] = {
+ PORT245_SCIFA5_RTS__MARK, PORT244_SCIFA5_CTS__MARK,
+};
+static const unsigned int scifa5_data_1_pins[] = {
+ /* RXD, TXD */
+ 195, 196,
+};
+static const unsigned int scifa5_data_1_mux[] = {
+ PORT195_SCIFA5_RXD_MARK, PORT196_SCIFA5_TXD_MARK,
+};
+static const unsigned int scifa5_clk_1_pins[] = {
+ /* SCK */
+ 197,
+};
+static const unsigned int scifa5_clk_1_mux[] = {
+ PORT197_SCIFA5_SCK_MARK,
+};
+static const unsigned int scifa5_ctrl_1_pins[] = {
+ /* RTS, CTS */
+ 194, 193,
+};
+static const unsigned int scifa5_ctrl_1_mux[] = {
+ PORT194_SCIFA5_RTS__MARK, PORT193_SCIFA5_CTS__MARK,
+};
+static const unsigned int scifa5_data_2_pins[] = {
+ /* RXD, TXD */
+ 162, 160,
+};
+static const unsigned int scifa5_data_2_mux[] = {
+ PORT162_SCIFA5_RXD_MARK, PORT160_SCIFA5_TXD_MARK,
+};
+static const unsigned int scifa5_clk_2_pins[] = {
+ /* SCK */
+ 159,
+};
+static const unsigned int scifa5_clk_2_mux[] = {
+ PORT159_SCIFA5_SCK_MARK,
+};
+static const unsigned int scifa5_ctrl_2_pins[] = {
+ /* RTS, CTS */
+ 163, 161,
+};
+static const unsigned int scifa5_ctrl_2_mux[] = {
+ PORT163_SCIFA5_RTS__MARK, PORT161_SCIFA5_CTS__MARK,
+};
+/* - SCIFA6 ----------------------------------------------------------------- */
+static const unsigned int scifa6_pins[] = {
+ /* TXD */
+ 240,
+};
+static const unsigned int scifa6_mux[] = {
+ SCIFA6_TXD_MARK,
+};
+/* - SCIFA7 ----------------------------------------------------------------- */
+static const unsigned int scifa7_data_pins[] = {
+ /* RXD, TXD */
+ 12, 18,
+};
+static const unsigned int scifa7_data_mux[] = {
+ SCIFA7_RXD_MARK, SCIFA7_TXD_MARK,
+};
+static const unsigned int scifa7_ctrl_pins[] = {
+ /* RTS, CTS */
+ 19, 13,
+};
+static const unsigned int scifa7_ctrl_mux[] = {
+ SCIFA7_RTS__MARK, SCIFA7_CTS__MARK,
+};
+/* - SCIFB ------------------------------------------------------------------ */
+static const unsigned int scifb_data_0_pins[] = {
+ /* RXD, TXD */
+ 162, 160,
+};
+static const unsigned int scifb_data_0_mux[] = {
+ PORT162_SCIFB_RXD_MARK, PORT160_SCIFB_TXD_MARK,
+};
+static const unsigned int scifb_clk_0_pins[] = {
+ /* SCK */
+ 159,
+};
+static const unsigned int scifb_clk_0_mux[] = {
+ PORT159_SCIFB_SCK_MARK,
+};
+static const unsigned int scifb_ctrl_0_pins[] = {
+ /* RTS, CTS */
+ 163, 161,
+};
+static const unsigned int scifb_ctrl_0_mux[] = {
+ PORT163_SCIFB_RTS__MARK, PORT161_SCIFB_CTS__MARK,
+};
+static const unsigned int scifb_data_1_pins[] = {
+ /* RXD, TXD */
+ 246, 247,
+};
+static const unsigned int scifb_data_1_mux[] = {
+ PORT246_SCIFB_RXD_MARK, PORT247_SCIFB_TXD_MARK,
+};
+static const unsigned int scifb_clk_1_pins[] = {
+ /* SCK */
+ 248,
+};
+static const unsigned int scifb_clk_1_mux[] = {
+ PORT248_SCIFB_SCK_MARK,
+};
+static const unsigned int scifb_ctrl_1_pins[] = {
+ /* RTS, CTS */
+ 245, 244,
+};
+static const unsigned int scifb_ctrl_1_mux[] = {
+ PORT245_SCIFB_RTS__MARK, PORT244_SCIFB_CTS__MARK,
+};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ 252,
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SDHID0_0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ 252, 253, 254, 255,
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CMD, CLK */
+ 256, 250,
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SDHICMD0_MARK, SDHICLK0_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ 251,
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SDHICD0_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ 257,
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SDHIWP0_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ 259,
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SDHID1_0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ 259, 260, 261, 262,
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SDHID1_0_MARK, SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CMD, CLK */
+ 263, 258,
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SDHICMD1_MARK, SDHICLK1_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ 265,
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SDHID2_0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ 265, 266, 267, 268,
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SDHID2_0_MARK, SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CMD, CLK */
+ 269, 264,
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SDHICMD2_MARK, SDHICLK2_MARK,
+};
+/* - USB -------------------------------------------------------------------- */
+static const unsigned int usb_vbus_pins[] = {
+ /* VBUS */
+ 0,
+};
+static const unsigned int usb_vbus_mux[] = {
+ VBUS_0_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(bsc_data_0_7),
+ SH_PFC_PIN_GROUP(bsc_data_8_15),
+ SH_PFC_PIN_GROUP(bsc_cs4),
+ SH_PFC_PIN_GROUP(bsc_cs5_a),
+ SH_PFC_PIN_GROUP(bsc_cs5_b),
+ SH_PFC_PIN_GROUP(bsc_cs6_a),
+ SH_PFC_PIN_GROUP(bsc_cs6_b),
+ SH_PFC_PIN_GROUP(bsc_rd),
+ SH_PFC_PIN_GROUP(bsc_rdwr_0),
+ SH_PFC_PIN_GROUP(bsc_rdwr_1),
+ SH_PFC_PIN_GROUP(bsc_rdwr_2),
+ SH_PFC_PIN_GROUP(bsc_we0),
+ SH_PFC_PIN_GROUP(bsc_we1),
+ SH_PFC_PIN_GROUP(fsia_mclk_in),
+ SH_PFC_PIN_GROUP(fsia_mclk_out),
+ SH_PFC_PIN_GROUP(fsia_sclk_in),
+ SH_PFC_PIN_GROUP(fsia_sclk_out),
+ SH_PFC_PIN_GROUP(fsia_data_in),
+ SH_PFC_PIN_GROUP(fsia_data_out),
+ SH_PFC_PIN_GROUP(fsia_spdif),
+ SH_PFC_PIN_GROUP(fsib_mclk_in),
+ SH_PFC_PIN_GROUP(fsib_mclk_out),
+ SH_PFC_PIN_GROUP(fsib_sclk_in),
+ SH_PFC_PIN_GROUP(fsib_sclk_out),
+ SH_PFC_PIN_GROUP(fsib_data_in),
+ SH_PFC_PIN_GROUP(fsib_data_out),
+ SH_PFC_PIN_GROUP(fsib_spdif),
+ SH_PFC_PIN_GROUP(fsic_mclk_in),
+ SH_PFC_PIN_GROUP(fsic_mclk_out),
+ SH_PFC_PIN_GROUP(fsic_sclk_in),
+ SH_PFC_PIN_GROUP(fsic_sclk_out),
+ SH_PFC_PIN_GROUP(fsic_data_in),
+ SH_PFC_PIN_GROUP(fsic_data_out),
+ SH_PFC_PIN_GROUP(fsic_spdif_0),
+ SH_PFC_PIN_GROUP(fsic_spdif_1),
+ SH_PFC_PIN_GROUP(fsid_sclk_in),
+ SH_PFC_PIN_GROUP(fsid_sclk_out),
+ SH_PFC_PIN_GROUP(fsid_data_in),
+ SH_PFC_PIN_GROUP(i2c2_0),
+ SH_PFC_PIN_GROUP(i2c2_1),
+ SH_PFC_PIN_GROUP(i2c2_2),
+ SH_PFC_PIN_GROUP(i2c3_0),
+ SH_PFC_PIN_GROUP(i2c3_1),
+ SH_PFC_PIN_GROUP(i2c3_2),
+ SH_PFC_PIN_GROUP(irda_0),
+ SH_PFC_PIN_GROUP(irda_1),
+ SH_PFC_PIN_GROUP(keysc_in5),
+ SH_PFC_PIN_GROUP(keysc_in6),
+ SH_PFC_PIN_GROUP(keysc_in7),
+ SH_PFC_PIN_GROUP(keysc_in8),
+ SH_PFC_PIN_GROUP(keysc_out04),
+ SH_PFC_PIN_GROUP(keysc_out5),
+ SH_PFC_PIN_GROUP(keysc_out6_0),
+ SH_PFC_PIN_GROUP(keysc_out6_1),
+ SH_PFC_PIN_GROUP(keysc_out6_2),
+ SH_PFC_PIN_GROUP(keysc_out7_0),
+ SH_PFC_PIN_GROUP(keysc_out7_1),
+ SH_PFC_PIN_GROUP(keysc_out7_2),
+ SH_PFC_PIN_GROUP(keysc_out8_0),
+ SH_PFC_PIN_GROUP(keysc_out8_1),
+ SH_PFC_PIN_GROUP(keysc_out8_2),
+ SH_PFC_PIN_GROUP(keysc_out9_0),
+ SH_PFC_PIN_GROUP(keysc_out9_1),
+ SH_PFC_PIN_GROUP(keysc_out9_2),
+ SH_PFC_PIN_GROUP(keysc_out10_0),
+ SH_PFC_PIN_GROUP(keysc_out10_1),
+ SH_PFC_PIN_GROUP(keysc_out11_0),
+ SH_PFC_PIN_GROUP(keysc_out11_1),
+ SH_PFC_PIN_GROUP(lcd_data8),
+ SH_PFC_PIN_GROUP(lcd_data9),
+ SH_PFC_PIN_GROUP(lcd_data12),
+ SH_PFC_PIN_GROUP(lcd_data16),
+ SH_PFC_PIN_GROUP(lcd_data18),
+ SH_PFC_PIN_GROUP(lcd_data24),
+ SH_PFC_PIN_GROUP(lcd_display),
+ SH_PFC_PIN_GROUP(lcd_lclk),
+ SH_PFC_PIN_GROUP(lcd_sync),
+ SH_PFC_PIN_GROUP(lcd_sys),
+ SH_PFC_PIN_GROUP(lcd2_data8),
+ SH_PFC_PIN_GROUP(lcd2_data9),
+ SH_PFC_PIN_GROUP(lcd2_data12),
+ SH_PFC_PIN_GROUP(lcd2_data16),
+ SH_PFC_PIN_GROUP(lcd2_data18),
+ SH_PFC_PIN_GROUP(lcd2_data24),
+ SH_PFC_PIN_GROUP(lcd2_sync_0),
+ SH_PFC_PIN_GROUP(lcd2_sync_1),
+ SH_PFC_PIN_GROUP(lcd2_sys_0),
+ SH_PFC_PIN_GROUP(lcd2_sys_1),
+ SH_PFC_PIN_GROUP(mmc0_data1_0),
+ SH_PFC_PIN_GROUP(mmc0_data4_0),
+ SH_PFC_PIN_GROUP(mmc0_data8_0),
+ SH_PFC_PIN_GROUP(mmc0_ctrl_0),
+ SH_PFC_PIN_GROUP(mmc0_data1_1),
+ SH_PFC_PIN_GROUP(mmc0_data4_1),
+ SH_PFC_PIN_GROUP(mmc0_data8_1),
+ SH_PFC_PIN_GROUP(mmc0_ctrl_1),
+ SH_PFC_PIN_GROUP(scifa0_data),
+ SH_PFC_PIN_GROUP(scifa0_clk),
+ SH_PFC_PIN_GROUP(scifa0_ctrl),
+ SH_PFC_PIN_GROUP(scifa1_data),
+ SH_PFC_PIN_GROUP(scifa1_clk),
+ SH_PFC_PIN_GROUP(scifa1_ctrl),
+ SH_PFC_PIN_GROUP(scifa2_data_0),
+ SH_PFC_PIN_GROUP(scifa2_clk_0),
+ SH_PFC_PIN_GROUP(scifa2_ctrl_0),
+ SH_PFC_PIN_GROUP(scifa2_data_1),
+ SH_PFC_PIN_GROUP(scifa2_clk_1),
+ SH_PFC_PIN_GROUP(scifa2_ctrl_1),
+ SH_PFC_PIN_GROUP(scifa3_data),
+ SH_PFC_PIN_GROUP(scifa3_ctrl),
+ SH_PFC_PIN_GROUP(scifa4_data),
+ SH_PFC_PIN_GROUP(scifa4_ctrl),
+ SH_PFC_PIN_GROUP(scifa5_data_0),
+ SH_PFC_PIN_GROUP(scifa5_clk_0),
+ SH_PFC_PIN_GROUP(scifa5_ctrl_0),
+ SH_PFC_PIN_GROUP(scifa5_data_1),
+ SH_PFC_PIN_GROUP(scifa5_clk_1),
+ SH_PFC_PIN_GROUP(scifa5_ctrl_1),
+ SH_PFC_PIN_GROUP(scifa5_data_2),
+ SH_PFC_PIN_GROUP(scifa5_clk_2),
+ SH_PFC_PIN_GROUP(scifa5_ctrl_2),
+ SH_PFC_PIN_GROUP(scifa6),
+ SH_PFC_PIN_GROUP(scifa7_data),
+ SH_PFC_PIN_GROUP(scifa7_ctrl),
+ SH_PFC_PIN_GROUP(scifb_data_0),
+ SH_PFC_PIN_GROUP(scifb_clk_0),
+ SH_PFC_PIN_GROUP(scifb_ctrl_0),
+ SH_PFC_PIN_GROUP(scifb_data_1),
+ SH_PFC_PIN_GROUP(scifb_clk_1),
+ SH_PFC_PIN_GROUP(scifb_ctrl_1),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(usb_vbus),
+};
+
+static const char * const bsc_groups[] = {
+ "bsc_data_0_7",
+ "bsc_data_8_15",
+ "bsc_cs4",
+ "bsc_cs5_a",
+ "bsc_cs5_b",
+ "bsc_cs6_a",
+ "bsc_cs6_b",
+ "bsc_rd",
+ "bsc_rdwr_0",
+ "bsc_rdwr_1",
+ "bsc_rdwr_2",
+ "bsc_we0",
+ "bsc_we1",
+};
+
+static const char * const fsia_groups[] = {
+ "fsia_mclk_in",
+ "fsia_mclk_out",
+ "fsia_sclk_in",
+ "fsia_sclk_out",
+ "fsia_data_in",
+ "fsia_data_out",
+ "fsia_spdif",
+};
+
+static const char * const fsib_groups[] = {
+ "fsib_mclk_in",
+ "fsib_mclk_out",
+ "fsib_sclk_in",
+ "fsib_sclk_out",
+ "fsib_data_in",
+ "fsib_data_out",
+ "fsib_spdif",
+};
+
+static const char * const fsic_groups[] = {
+ "fsic_mclk_in",
+ "fsic_mclk_out",
+ "fsic_sclk_in",
+ "fsic_sclk_out",
+ "fsic_data_in",
+ "fsic_data_out",
+ "fsic_spdif",
+};
+
+static const char * const fsid_groups[] = {
+ "fsid_sclk_in",
+ "fsid_sclk_out",
+ "fsid_data_in",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_0",
+ "i2c2_1",
+ "i2c2_2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_0",
+ "i2c3_1",
+ "i2c3_2",
+};
+
+static const char * const irda_groups[] = {
+ "irda_0",
+ "irda_1",
+};
+static const char * const keysc_groups[] = {
+ "keysc_in5",
+ "keysc_in6",
+ "keysc_in7",
+ "keysc_in8",
+ "keysc_out04",
+ "keysc_out5",
+ "keysc_out6_0",
+ "keysc_out6_1",
+ "keysc_out6_2",
+ "keysc_out7_0",
+ "keysc_out7_1",
+ "keysc_out7_2",
+ "keysc_out8_0",
+ "keysc_out8_1",
+ "keysc_out8_2",
+ "keysc_out9_0",
+ "keysc_out9_1",
+ "keysc_out9_2",
+ "keysc_out10_0",
+ "keysc_out10_1",
+ "keysc_out11_0",
+ "keysc_out11_1",
+};
+
+static const char * const lcd_groups[] = {
+ "lcd_data8",
+ "lcd_data9",
+ "lcd_data12",
+ "lcd_data16",
+ "lcd_data18",
+ "lcd_data24",
+ "lcd_display",
+ "lcd_lclk",
+ "lcd_sync",
+ "lcd_sys",
+};
+
+static const char * const lcd2_groups[] = {
+ "lcd2_data8",
+ "lcd2_data9",
+ "lcd2_data12",
+ "lcd2_data16",
+ "lcd2_data18",
+ "lcd2_data24",
+ "lcd2_sync_0",
+ "lcd2_sync_1",
+ "lcd2_sys_0",
+ "lcd2_sys_1",
+};
+
+static const char * const mmc0_groups[] = {
+ "mmc0_data1_0",
+ "mmc0_data4_0",
+ "mmc0_data8_0",
+ "mmc0_ctrl_0",
+ "mmc0_data1_1",
+ "mmc0_data4_1",
+ "mmc0_data8_1",
+ "mmc0_ctrl_1",
+};
+
+static const char * const scifa0_groups[] = {
+ "scifa0_data",
+ "scifa0_clk",
+ "scifa0_ctrl",
+};
+
+static const char * const scifa1_groups[] = {
+ "scifa1_data",
+ "scifa1_clk",
+ "scifa1_ctrl",
+};
+
+static const char * const scifa2_groups[] = {
+ "scifa2_data_0",
+ "scifa2_clk_0",
+ "scifa2_ctrl_0",
+ "scifa2_data_1",
+ "scifa2_clk_1",
+ "scifa2_ctrl_1",
+};
+
+static const char * const scifa3_groups[] = {
+ "scifa3_data",
+ "scifa3_ctrl",
+};
+
+static const char * const scifa4_groups[] = {
+ "scifa4_data",
+ "scifa4_ctrl",
+};
+
+static const char * const scifa5_groups[] = {
+ "scifa5_data_0",
+ "scifa5_clk_0",
+ "scifa5_ctrl_0",
+ "scifa5_data_1",
+ "scifa5_clk_1",
+ "scifa5_ctrl_1",
+ "scifa5_data_2",
+ "scifa5_clk_2",
+ "scifa5_ctrl_2",
+};
+
+static const char * const scifa6_groups[] = {
+ "scifa6",
+};
+
+static const char * const scifa7_groups[] = {
+ "scifa7_data",
+ "scifa7_ctrl",
+};
+
+static const char * const scifb_groups[] = {
+ "scifb_data_0",
+ "scifb_clk_0",
+ "scifb_ctrl_0",
+ "scifb_data_1",
+ "scifb_clk_1",
+ "scifb_ctrl_1",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_ctrl",
+};
+
+static const char * const usb_groups[] = {
+ "usb_vbus",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(bsc),
+ SH_PFC_FUNCTION(fsia),
+ SH_PFC_FUNCTION(fsib),
+ SH_PFC_FUNCTION(fsic),
+ SH_PFC_FUNCTION(fsid),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(irda),
+ SH_PFC_FUNCTION(keysc),
+ SH_PFC_FUNCTION(lcd),
+ SH_PFC_FUNCTION(lcd2),
+ SH_PFC_FUNCTION(mmc0),
+ SH_PFC_FUNCTION(scifa0),
+ SH_PFC_FUNCTION(scifa1),
+ SH_PFC_FUNCTION(scifa2),
+ SH_PFC_FUNCTION(scifa3),
+ SH_PFC_FUNCTION(scifa4),
+ SH_PFC_FUNCTION(scifa5),
+ SH_PFC_FUNCTION(scifa6),
+ SH_PFC_FUNCTION(scifa7),
+ SH_PFC_FUNCTION(scifb),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(usb),
+};
+
+#define PINMUX_FN_BASE GPIO_FN_GPI0
+
+static const struct pinmux_func pinmux_func_gpios[] = {
/* Table 25-1 (Functions 0-7) */
- GPIO_FN(VBUS_0),
GPIO_FN(GPI0),
GPIO_FN(GPI1),
GPIO_FN(GPI2),
@@ -1556,19 +2948,12 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(GPI5),
GPIO_FN(GPI6),
GPIO_FN(GPI7),
- GPIO_FN(SCIFA7_RXD),
- GPIO_FN(SCIFA7_CTS_),
GPIO_FN(GPO7), \
GPIO_FN(MFG0_OUT2),
GPIO_FN(GPO6), \
GPIO_FN(MFG1_OUT2),
GPIO_FN(GPO5), \
- GPIO_FN(SCIFA0_SCK), \
- GPIO_FN(FSICOSLDT3), \
GPIO_FN(PORT16_VIO_CKOR),
- GPIO_FN(SCIFA0_TXD),
- GPIO_FN(SCIFA7_TXD),
- GPIO_FN(SCIFA7_RTS_), \
GPIO_FN(PORT19_VIO_CKO2),
GPIO_FN(GPO0),
GPIO_FN(GPO1),
@@ -1581,13 +2966,9 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(VINT),
GPIO_FN(TCKON),
GPIO_FN(XDVFS1), \
- GPIO_FN(PORT27_I2C_SCL2), \
- GPIO_FN(PORT27_I2C_SCL3), \
GPIO_FN(MFG0_OUT1), \
GPIO_FN(PORT27_IROUT),
GPIO_FN(XDVFS2), \
- GPIO_FN(PORT28_I2C_SDA2), \
- GPIO_FN(PORT28_I2C_SDA3), \
GPIO_FN(PORT28_TPU1TO1),
GPIO_FN(SIM_RST), \
GPIO_FN(PORT29_TPU1TO1),
@@ -1595,140 +2976,53 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(PORT30_VIO_CKOR),
GPIO_FN(SIM_D), \
GPIO_FN(PORT31_IROUT),
- GPIO_FN(SCIFA4_TXD),
- GPIO_FN(SCIFA4_RXD), \
GPIO_FN(XWUP),
- GPIO_FN(SCIFA4_RTS_),
- GPIO_FN(SCIFA4_CTS_),
- GPIO_FN(FSIBOBT), \
- GPIO_FN(FSIBIBT),
- GPIO_FN(FSIBOLR), \
- GPIO_FN(FSIBILR),
- GPIO_FN(FSIBOSLD),
- GPIO_FN(FSIBISLD),
GPIO_FN(VACK),
GPIO_FN(XTAL1L),
- GPIO_FN(SCIFA0_RTS_), \
- GPIO_FN(FSICOSLDT2),
- GPIO_FN(SCIFA0_RXD),
- GPIO_FN(SCIFA0_CTS_), \
- GPIO_FN(FSICOSLDT1),
- GPIO_FN(FSICOBT), \
- GPIO_FN(FSICIBT), \
- GPIO_FN(FSIDOBT), \
- GPIO_FN(FSIDIBT),
- GPIO_FN(FSICOLR), \
- GPIO_FN(FSICILR), \
- GPIO_FN(FSIDOLR), \
- GPIO_FN(FSIDILR),
- GPIO_FN(FSICOSLD), \
- GPIO_FN(PORT47_FSICSPDIF),
- GPIO_FN(FSICISLD), \
- GPIO_FN(FSIDISLD),
- GPIO_FN(FSIACK), \
- GPIO_FN(PORT49_IRDA_OUT), \
GPIO_FN(PORT49_IROUT), \
- GPIO_FN(FSIAOMC),
- GPIO_FN(FSIAOLR), \
GPIO_FN(BBIF2_TSYNC2), \
GPIO_FN(TPU2TO2), \
- GPIO_FN(FSIAILR),
- GPIO_FN(FSIAOBT), \
GPIO_FN(BBIF2_TSCK2), \
GPIO_FN(TPU2TO3), \
- GPIO_FN(FSIAIBT),
- GPIO_FN(FSIAOSLD), \
GPIO_FN(BBIF2_TXD2),
- GPIO_FN(FSIASPDIF), \
- GPIO_FN(PORT53_IRDA_IN), \
GPIO_FN(TPU3TO3), \
- GPIO_FN(FSIBSPDIF), \
- GPIO_FN(PORT53_FSICSPDIF),
- GPIO_FN(FSIBCK), \
- GPIO_FN(PORT54_IRDA_FIRSEL), \
GPIO_FN(TPU3TO2), \
- GPIO_FN(FSIBOMC), \
- GPIO_FN(FSICCK), \
- GPIO_FN(FSICOMC),
- GPIO_FN(FSIAISLD), \
GPIO_FN(TPU0TO0),
GPIO_FN(A0), \
GPIO_FN(BS_),
GPIO_FN(A12), \
- GPIO_FN(PORT58_KEYOUT7), \
GPIO_FN(TPU4TO2),
GPIO_FN(A13), \
- GPIO_FN(PORT59_KEYOUT6), \
GPIO_FN(TPU0TO1),
GPIO_FN(A14), \
- GPIO_FN(KEYOUT5),
GPIO_FN(A15), \
- GPIO_FN(KEYOUT4),
GPIO_FN(A16), \
- GPIO_FN(KEYOUT3), \
GPIO_FN(MSIOF0_SS1),
GPIO_FN(A17), \
- GPIO_FN(KEYOUT2), \
GPIO_FN(MSIOF0_TSYNC),
GPIO_FN(A18), \
- GPIO_FN(KEYOUT1), \
GPIO_FN(MSIOF0_TSCK),
GPIO_FN(A19), \
- GPIO_FN(KEYOUT0), \
GPIO_FN(MSIOF0_TXD),
GPIO_FN(A20), \
- GPIO_FN(KEYIN0), \
GPIO_FN(MSIOF0_RSCK),
GPIO_FN(A21), \
- GPIO_FN(KEYIN1), \
GPIO_FN(MSIOF0_RSYNC),
GPIO_FN(A22), \
- GPIO_FN(KEYIN2), \
GPIO_FN(MSIOF0_MCK0),
GPIO_FN(A23), \
- GPIO_FN(KEYIN3), \
GPIO_FN(MSIOF0_MCK1),
GPIO_FN(A24), \
- GPIO_FN(KEYIN4), \
GPIO_FN(MSIOF0_RXD),
GPIO_FN(A25), \
- GPIO_FN(KEYIN5), \
GPIO_FN(MSIOF0_SS2),
GPIO_FN(A26), \
- GPIO_FN(KEYIN6),
- GPIO_FN(KEYIN7),
- GPIO_FN(D0_NAF0),
- GPIO_FN(D1_NAF1),
- GPIO_FN(D2_NAF2),
- GPIO_FN(D3_NAF3),
- GPIO_FN(D4_NAF4),
- GPIO_FN(D5_NAF5),
- GPIO_FN(D6_NAF6),
- GPIO_FN(D7_NAF7),
- GPIO_FN(D8_NAF8),
- GPIO_FN(D9_NAF9),
- GPIO_FN(D10_NAF10),
- GPIO_FN(D11_NAF11),
- GPIO_FN(D12_NAF12),
- GPIO_FN(D13_NAF13),
- GPIO_FN(D14_NAF14),
- GPIO_FN(D15_NAF15),
- GPIO_FN(CS4_),
- GPIO_FN(CS5A_), \
- GPIO_FN(PORT91_RDWR),
- GPIO_FN(CS5B_), \
GPIO_FN(FCE1_),
- GPIO_FN(CS6B_), \
GPIO_FN(DACK0),
GPIO_FN(FCE0_), \
- GPIO_FN(CS6A_),
GPIO_FN(WAIT_), \
GPIO_FN(DREQ0),
- GPIO_FN(RD__FSC),
- GPIO_FN(WE0__FWE), \
- GPIO_FN(RDWR_FWE),
- GPIO_FN(WE1_),
GPIO_FN(FRB),
GPIO_FN(CKO),
GPIO_FN(NBRSTOUT_),
@@ -1737,14 +3031,10 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(BBIF2_RXD),
GPIO_FN(BBIF2_SYNC),
GPIO_FN(BBIF2_SCK),
- GPIO_FN(SCIFA3_CTS_), \
GPIO_FN(MFG3_IN2),
- GPIO_FN(SCIFA3_RXD), \
GPIO_FN(MFG3_IN1),
GPIO_FN(BBIF1_SS2), \
- GPIO_FN(SCIFA3_RTS_), \
GPIO_FN(MFG3_OUT1),
- GPIO_FN(SCIFA3_TXD),
GPIO_FN(HSI_RX_DATA), \
GPIO_FN(BBIF1_RXD),
GPIO_FN(HSI_TX_WAKE), \
@@ -1755,103 +3045,57 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(BBIF1_TXD),
GPIO_FN(HSI_RX_READY), \
GPIO_FN(BBIF1_RSCK), \
- GPIO_FN(PORT115_I2C_SCL2), \
- GPIO_FN(PORT115_I2C_SCL3),
GPIO_FN(HSI_RX_WAKE), \
GPIO_FN(BBIF1_RSYNC), \
- GPIO_FN(PORT116_I2C_SDA2), \
- GPIO_FN(PORT116_I2C_SDA3),
GPIO_FN(HSI_RX_FLAG), \
GPIO_FN(BBIF1_SS1), \
GPIO_FN(BBIF1_FLOW),
GPIO_FN(HSI_TX_FLAG),
GPIO_FN(VIO_VD), \
- GPIO_FN(PORT128_LCD2VSYN), \
GPIO_FN(VIO2_VD), \
- GPIO_FN(LCD2D0),
GPIO_FN(VIO_HD), \
- GPIO_FN(PORT129_LCD2HSYN), \
- GPIO_FN(PORT129_LCD2CS_), \
GPIO_FN(VIO2_HD), \
- GPIO_FN(LCD2D1),
GPIO_FN(VIO_D0), \
GPIO_FN(PORT130_MSIOF2_RXD), \
- GPIO_FN(LCD2D10),
GPIO_FN(VIO_D1), \
- GPIO_FN(PORT131_KEYOUT6), \
GPIO_FN(PORT131_MSIOF2_SS1), \
- GPIO_FN(PORT131_KEYOUT11), \
- GPIO_FN(LCD2D11),
GPIO_FN(VIO_D2), \
- GPIO_FN(PORT132_KEYOUT7), \
GPIO_FN(PORT132_MSIOF2_SS2), \
- GPIO_FN(PORT132_KEYOUT10), \
- GPIO_FN(LCD2D12),
GPIO_FN(VIO_D3), \
GPIO_FN(MSIOF2_TSYNC), \
- GPIO_FN(LCD2D13),
GPIO_FN(VIO_D4), \
GPIO_FN(MSIOF2_TXD), \
- GPIO_FN(LCD2D14),
GPIO_FN(VIO_D5), \
GPIO_FN(MSIOF2_TSCK), \
- GPIO_FN(LCD2D15),
GPIO_FN(VIO_D6), \
- GPIO_FN(PORT136_KEYOUT8), \
- GPIO_FN(LCD2D16),
GPIO_FN(VIO_D7), \
- GPIO_FN(PORT137_KEYOUT9), \
- GPIO_FN(LCD2D17),
GPIO_FN(VIO_D8), \
- GPIO_FN(PORT138_KEYOUT8), \
GPIO_FN(VIO2_D0), \
- GPIO_FN(LCD2D6),
GPIO_FN(VIO_D9), \
- GPIO_FN(PORT139_KEYOUT9), \
GPIO_FN(VIO2_D1), \
- GPIO_FN(LCD2D7),
GPIO_FN(VIO_D10), \
GPIO_FN(TPU0TO2), \
GPIO_FN(VIO2_D2), \
- GPIO_FN(LCD2D8),
GPIO_FN(VIO_D11), \
GPIO_FN(TPU0TO3), \
GPIO_FN(VIO2_D3), \
- GPIO_FN(LCD2D9),
GPIO_FN(VIO_D12), \
- GPIO_FN(PORT142_KEYOUT10), \
GPIO_FN(VIO2_D4), \
- GPIO_FN(LCD2D2),
GPIO_FN(VIO_D13), \
- GPIO_FN(PORT143_KEYOUT11), \
- GPIO_FN(PORT143_KEYOUT6), \
GPIO_FN(VIO2_D5), \
- GPIO_FN(LCD2D3),
GPIO_FN(VIO_D14), \
- GPIO_FN(PORT144_KEYOUT7), \
GPIO_FN(VIO2_D6), \
- GPIO_FN(LCD2D4),
GPIO_FN(VIO_D15), \
GPIO_FN(TPU1TO3), \
- GPIO_FN(PORT145_LCD2DISP), \
- GPIO_FN(PORT145_LCD2RS), \
GPIO_FN(VIO2_D7), \
- GPIO_FN(LCD2D5),
GPIO_FN(VIO_CLK), \
- GPIO_FN(LCD2DCK), \
- GPIO_FN(PORT146_LCD2WR_), \
GPIO_FN(VIO2_CLK), \
- GPIO_FN(LCD2D18),
GPIO_FN(VIO_FIELD), \
- GPIO_FN(LCD2RD_), \
GPIO_FN(VIO2_FIELD), \
- GPIO_FN(LCD2D19),
GPIO_FN(VIO_CKO),
GPIO_FN(A27), \
- GPIO_FN(PORT149_RDWR), \
GPIO_FN(MFG0_IN1), \
- GPIO_FN(PORT149_KEYOUT9),
GPIO_FN(MFG0_IN2),
GPIO_FN(TS_SPSYNC3), \
GPIO_FN(MSIOF2_RSCK),
@@ -1860,201 +3104,105 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(TPU1TO2), \
GPIO_FN(TS_SDEN3), \
GPIO_FN(PORT153_MSIOF2_SS1),
- GPIO_FN(SCIFA2_TXD1), \
GPIO_FN(MSIOF2_MCK0),
- GPIO_FN(SCIFA2_RXD1), \
GPIO_FN(MSIOF2_MCK1),
- GPIO_FN(SCIFA2_RTS1_), \
GPIO_FN(PORT156_MSIOF2_SS2),
- GPIO_FN(SCIFA2_CTS1_), \
GPIO_FN(PORT157_MSIOF2_RXD),
GPIO_FN(DINT_), \
- GPIO_FN(SCIFA2_SCK1), \
GPIO_FN(TS_SCK3),
- GPIO_FN(PORT159_SCIFB_SCK), \
- GPIO_FN(PORT159_SCIFA5_SCK), \
GPIO_FN(NMI),
- GPIO_FN(PORT160_SCIFB_TXD), \
- GPIO_FN(PORT160_SCIFA5_TXD),
- GPIO_FN(PORT161_SCIFB_CTS_), \
- GPIO_FN(PORT161_SCIFA5_CTS_),
- GPIO_FN(PORT162_SCIFB_RXD), \
- GPIO_FN(PORT162_SCIFA5_RXD),
- GPIO_FN(PORT163_SCIFB_RTS_), \
- GPIO_FN(PORT163_SCIFA5_RTS_), \
GPIO_FN(TPU3TO0),
- GPIO_FN(LCDD0),
- GPIO_FN(LCDD1), \
- GPIO_FN(PORT193_SCIFA5_CTS_), \
GPIO_FN(BBIF2_TSYNC1),
- GPIO_FN(LCDD2), \
- GPIO_FN(PORT194_SCIFA5_RTS_), \
GPIO_FN(BBIF2_TSCK1),
- GPIO_FN(LCDD3), \
- GPIO_FN(PORT195_SCIFA5_RXD), \
GPIO_FN(BBIF2_TXD1),
- GPIO_FN(LCDD4), \
- GPIO_FN(PORT196_SCIFA5_TXD),
- GPIO_FN(LCDD5), \
- GPIO_FN(PORT197_SCIFA5_SCK), \
GPIO_FN(MFG2_OUT2), \
GPIO_FN(TPU2TO1),
- GPIO_FN(LCDD6),
- GPIO_FN(LCDD7), \
GPIO_FN(TPU4TO1), \
GPIO_FN(MFG4_OUT2),
- GPIO_FN(LCDD8), \
GPIO_FN(D16),
- GPIO_FN(LCDD9), \
GPIO_FN(D17),
- GPIO_FN(LCDD10), \
GPIO_FN(D18),
- GPIO_FN(LCDD11), \
GPIO_FN(D19),
- GPIO_FN(LCDD12), \
GPIO_FN(D20),
- GPIO_FN(LCDD13), \
GPIO_FN(D21),
- GPIO_FN(LCDD14), \
GPIO_FN(D22),
- GPIO_FN(LCDD15), \
GPIO_FN(PORT207_MSIOF0L_SS1), \
GPIO_FN(D23),
- GPIO_FN(LCDD16), \
GPIO_FN(PORT208_MSIOF0L_SS2), \
GPIO_FN(D24),
- GPIO_FN(LCDD17), \
GPIO_FN(D25),
- GPIO_FN(LCDD18), \
GPIO_FN(DREQ2), \
GPIO_FN(PORT210_MSIOF0L_SS1), \
GPIO_FN(D26),
- GPIO_FN(LCDD19), \
GPIO_FN(PORT211_MSIOF0L_SS2), \
GPIO_FN(D27),
- GPIO_FN(LCDD20), \
GPIO_FN(TS_SPSYNC1), \
GPIO_FN(MSIOF0L_MCK0), \
GPIO_FN(D28),
- GPIO_FN(LCDD21), \
GPIO_FN(TS_SDAT1), \
GPIO_FN(MSIOF0L_MCK1), \
GPIO_FN(D29),
- GPIO_FN(LCDD22), \
GPIO_FN(TS_SDEN1), \
GPIO_FN(MSIOF0L_RSCK), \
GPIO_FN(D30),
- GPIO_FN(LCDD23), \
GPIO_FN(TS_SCK1), \
GPIO_FN(MSIOF0L_RSYNC), \
GPIO_FN(D31),
- GPIO_FN(LCDDCK), \
- GPIO_FN(LCDWR_),
- GPIO_FN(LCDRD_), \
GPIO_FN(DACK2), \
- GPIO_FN(PORT217_LCD2RS), \
GPIO_FN(MSIOF0L_TSYNC), \
GPIO_FN(VIO2_FIELD3), \
- GPIO_FN(PORT217_LCD2DISP),
- GPIO_FN(LCDHSYN), \
- GPIO_FN(LCDCS_), \
- GPIO_FN(LCDCS2_), \
GPIO_FN(DACK3), \
GPIO_FN(PORT218_VIO_CKOR),
- GPIO_FN(LCDDISP), \
- GPIO_FN(LCDRS), \
- GPIO_FN(PORT219_LCD2WR_), \
GPIO_FN(DREQ3), \
GPIO_FN(MSIOF0L_TSCK), \
GPIO_FN(VIO2_CLK3), \
- GPIO_FN(LCD2DCK_2),
- GPIO_FN(LCDVSYN), \
- GPIO_FN(LCDVSYN2),
- GPIO_FN(LCDLCLK), \
GPIO_FN(DREQ1), \
- GPIO_FN(PORT221_LCD2CS_), \
GPIO_FN(PWEN), \
GPIO_FN(MSIOF0L_RXD), \
GPIO_FN(VIO2_HD3), \
- GPIO_FN(PORT221_LCD2HSYN),
- GPIO_FN(LCDDON), \
- GPIO_FN(LCDDON2), \
GPIO_FN(DACK1), \
GPIO_FN(OVCN), \
GPIO_FN(MSIOF0L_TXD), \
GPIO_FN(VIO2_VD3), \
- GPIO_FN(PORT222_LCD2VSYN),
- GPIO_FN(SCIFA1_TXD), \
GPIO_FN(OVCN2),
GPIO_FN(EXTLP), \
- GPIO_FN(SCIFA1_SCK), \
GPIO_FN(PORT226_VIO_CKO2),
- GPIO_FN(SCIFA1_RTS_), \
GPIO_FN(IDIN),
- GPIO_FN(SCIFA1_RXD),
- GPIO_FN(SCIFA1_CTS_), \
GPIO_FN(MFG1_IN1),
GPIO_FN(MSIOF1_TXD), \
- GPIO_FN(SCIFA2_TXD2),
GPIO_FN(MSIOF1_TSYNC), \
- GPIO_FN(SCIFA2_CTS2_),
GPIO_FN(MSIOF1_TSCK), \
- GPIO_FN(SCIFA2_SCK2),
GPIO_FN(MSIOF1_RXD), \
- GPIO_FN(SCIFA2_RXD2),
GPIO_FN(MSIOF1_RSCK), \
- GPIO_FN(SCIFA2_RTS2_), \
GPIO_FN(VIO2_CLK2), \
- GPIO_FN(LCD2D20),
GPIO_FN(MSIOF1_RSYNC), \
GPIO_FN(MFG1_IN2), \
GPIO_FN(VIO2_VD2), \
- GPIO_FN(LCD2D21),
GPIO_FN(MSIOF1_MCK0), \
- GPIO_FN(PORT236_I2C_SDA2),
GPIO_FN(MSIOF1_MCK1), \
- GPIO_FN(PORT237_I2C_SCL2),
GPIO_FN(MSIOF1_SS1), \
GPIO_FN(VIO2_FIELD2), \
- GPIO_FN(LCD2D22),
GPIO_FN(MSIOF1_SS2), \
GPIO_FN(VIO2_HD2), \
- GPIO_FN(LCD2D23),
- GPIO_FN(SCIFA6_TXD),
- GPIO_FN(PORT241_IRDA_OUT), \
GPIO_FN(PORT241_IROUT), \
GPIO_FN(MFG4_OUT1), \
GPIO_FN(TPU4TO0),
- GPIO_FN(PORT242_IRDA_IN), \
GPIO_FN(MFG4_IN2),
- GPIO_FN(PORT243_IRDA_FIRSEL), \
GPIO_FN(PORT243_VIO_CKO2),
- GPIO_FN(PORT244_SCIFA5_CTS_), \
GPIO_FN(MFG2_IN1), \
- GPIO_FN(PORT244_SCIFB_CTS_), \
GPIO_FN(MSIOF2R_RXD),
- GPIO_FN(PORT245_SCIFA5_RTS_), \
GPIO_FN(MFG2_IN2), \
- GPIO_FN(PORT245_SCIFB_RTS_), \
GPIO_FN(MSIOF2R_TXD),
- GPIO_FN(PORT246_SCIFA5_RXD), \
GPIO_FN(MFG1_OUT1), \
- GPIO_FN(PORT246_SCIFB_RXD), \
GPIO_FN(TPU1TO0),
- GPIO_FN(PORT247_SCIFA5_TXD), \
GPIO_FN(MFG3_OUT2), \
- GPIO_FN(PORT247_SCIFB_TXD), \
GPIO_FN(TPU3TO1),
- GPIO_FN(PORT248_SCIFA5_SCK), \
GPIO_FN(MFG2_OUT1), \
- GPIO_FN(PORT248_SCIFB_SCK), \
GPIO_FN(TPU2TO0), \
- GPIO_FN(PORT248_I2C_SCL3), \
GPIO_FN(MSIOF2R_TSCK),
GPIO_FN(PORT249_IROUT), \
GPIO_FN(MFG4_IN1), \
- GPIO_FN(PORT249_I2C_SDA3), \
GPIO_FN(MSIOF2R_TSYNC),
GPIO_FN(SDHICLK0),
GPIO_FN(SDHICD0),
@@ -2172,56 +3320,24 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(IRQ9_MEM_INT),
GPIO_FN(IRQ9_MCP_INT),
GPIO_FN(A11),
- GPIO_FN(KEYOUT8),
GPIO_FN(TPU4TO3),
GPIO_FN(RESETA_N_PU_ON),
GPIO_FN(RESETA_N_PU_OFF),
GPIO_FN(EDBGREQ_PD),
GPIO_FN(EDBGREQ_PU),
+};
- /* Functions with pull-ups */
- GPIO_FN(KEYIN0_PU),
- GPIO_FN(KEYIN1_PU),
- GPIO_FN(KEYIN2_PU),
- GPIO_FN(KEYIN3_PU),
- GPIO_FN(KEYIN4_PU),
- GPIO_FN(KEYIN5_PU),
- GPIO_FN(KEYIN6_PU),
- GPIO_FN(KEYIN7_PU),
- GPIO_FN(SDHICD0_PU),
- GPIO_FN(SDHID0_0_PU),
- GPIO_FN(SDHID0_1_PU),
- GPIO_FN(SDHID0_2_PU),
- GPIO_FN(SDHID0_3_PU),
- GPIO_FN(SDHICMD0_PU),
- GPIO_FN(SDHIWP0_PU),
- GPIO_FN(SDHID1_0_PU),
- GPIO_FN(SDHID1_1_PU),
- GPIO_FN(SDHID1_2_PU),
- GPIO_FN(SDHID1_3_PU),
- GPIO_FN(SDHICMD1_PU),
- GPIO_FN(SDHID2_0_PU),
- GPIO_FN(SDHID2_1_PU),
- GPIO_FN(SDHID2_2_PU),
- GPIO_FN(SDHID2_3_PU),
- GPIO_FN(SDHICMD2_PU),
- GPIO_FN(MMCCMD0_PU),
- GPIO_FN(MMCCMD1_PU),
- GPIO_FN(MMCD0_0_PU),
- GPIO_FN(MMCD0_1_PU),
- GPIO_FN(MMCD0_2_PU),
- GPIO_FN(MMCD0_3_PU),
- GPIO_FN(MMCD0_4_PU),
- GPIO_FN(MMCD0_5_PU),
- GPIO_FN(MMCD0_6_PU),
- GPIO_FN(MMCD0_7_PU),
- GPIO_FN(FSIACK_PU),
- GPIO_FN(FSIAILR_PU),
- GPIO_FN(FSIAIBT_PU),
- GPIO_FN(FSIAISLD_PU),
-};
-
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+#undef PORTCR
+#define PORTCR(nr, reg) \
+ { \
+ PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
+ _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
+ PORT##nr##_FN0, PORT##nr##_FN1, \
+ PORT##nr##_FN2, PORT##nr##_FN3, \
+ PORT##nr##_FN4, PORT##nr##_FN5, \
+ PORT##nr##_FN6, PORT##nr##_FN7 } \
+ }
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xe6050000), /* PORT0CR */
PORTCR(1, 0xe6050001), /* PORT1CR */
PORTCR(2, 0xe6050002), /* PORT2CR */
@@ -2629,7 +3745,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
@@ -2733,60 +3849,116 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-/* IRQ pins through INTCS with IRQ0->15 from 0x200 and IRQ16-31 from 0x3200 */
-#define EXT_IRQ16L(n) intcs_evt2irq(0x200 + ((n) << 5))
-#define EXT_IRQ16H(n) intcs_evt2irq(0x3200 + ((n - 16) << 5))
-
-static struct pinmux_irq pinmux_irqs[] = {
- PINMUX_IRQ(EXT_IRQ16H(19), PORT9_FN0),
- PINMUX_IRQ(EXT_IRQ16L(1), PORT10_FN0),
- PINMUX_IRQ(EXT_IRQ16L(0), PORT11_FN0),
- PINMUX_IRQ(EXT_IRQ16H(18), PORT13_FN0),
- PINMUX_IRQ(EXT_IRQ16H(20), PORT14_FN0),
- PINMUX_IRQ(EXT_IRQ16H(21), PORT15_FN0),
- PINMUX_IRQ(EXT_IRQ16H(31), PORT26_FN0),
- PINMUX_IRQ(EXT_IRQ16H(30), PORT27_FN0),
- PINMUX_IRQ(EXT_IRQ16H(29), PORT28_FN0),
- PINMUX_IRQ(EXT_IRQ16H(22), PORT40_FN0),
- PINMUX_IRQ(EXT_IRQ16H(23), PORT53_FN0),
- PINMUX_IRQ(EXT_IRQ16L(10), PORT54_FN0),
- PINMUX_IRQ(EXT_IRQ16L(9), PORT56_FN0),
- PINMUX_IRQ(EXT_IRQ16H(26), PORT115_FN0),
- PINMUX_IRQ(EXT_IRQ16H(27), PORT116_FN0),
- PINMUX_IRQ(EXT_IRQ16H(28), PORT117_FN0),
- PINMUX_IRQ(EXT_IRQ16H(24), PORT118_FN0),
- PINMUX_IRQ(EXT_IRQ16L(6), PORT147_FN0),
- PINMUX_IRQ(EXT_IRQ16L(2), PORT149_FN0),
- PINMUX_IRQ(EXT_IRQ16L(7), PORT150_FN0),
- PINMUX_IRQ(EXT_IRQ16L(12), PORT156_FN0),
- PINMUX_IRQ(EXT_IRQ16L(4), PORT159_FN0),
- PINMUX_IRQ(EXT_IRQ16H(25), PORT164_FN0),
- PINMUX_IRQ(EXT_IRQ16L(8), PORT223_FN0),
- PINMUX_IRQ(EXT_IRQ16L(3), PORT224_FN0),
- PINMUX_IRQ(EXT_IRQ16L(5), PORT227_FN0),
- PINMUX_IRQ(EXT_IRQ16H(17), PORT234_FN0),
- PINMUX_IRQ(EXT_IRQ16L(11), PORT238_FN0),
- PINMUX_IRQ(EXT_IRQ16L(13), PORT239_FN0),
- PINMUX_IRQ(EXT_IRQ16H(16), PORT249_FN0),
- PINMUX_IRQ(EXT_IRQ16L(14), PORT251_FN0),
- PINMUX_IRQ(EXT_IRQ16L(9), PORT308_FN0),
-};
-
-struct sh_pfc_soc_info sh73a0_pinmux_info = {
+/* External IRQ pins mapped at IRQPIN_BASE */
+#define EXT_IRQ16L(n) irq_pin(n)
+#define EXT_IRQ16H(n) irq_pin(n)
+
+static const struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(EXT_IRQ16H(19), 9),
+ PINMUX_IRQ(EXT_IRQ16L(1), 10),
+ PINMUX_IRQ(EXT_IRQ16L(0), 11),
+ PINMUX_IRQ(EXT_IRQ16H(18), 13),
+ PINMUX_IRQ(EXT_IRQ16H(20), 14),
+ PINMUX_IRQ(EXT_IRQ16H(21), 15),
+ PINMUX_IRQ(EXT_IRQ16H(31), 26),
+ PINMUX_IRQ(EXT_IRQ16H(30), 27),
+ PINMUX_IRQ(EXT_IRQ16H(29), 28),
+ PINMUX_IRQ(EXT_IRQ16H(22), 40),
+ PINMUX_IRQ(EXT_IRQ16H(23), 53),
+ PINMUX_IRQ(EXT_IRQ16L(10), 54),
+ PINMUX_IRQ(EXT_IRQ16L(9), 56),
+ PINMUX_IRQ(EXT_IRQ16H(26), 115),
+ PINMUX_IRQ(EXT_IRQ16H(27), 116),
+ PINMUX_IRQ(EXT_IRQ16H(28), 117),
+ PINMUX_IRQ(EXT_IRQ16H(24), 118),
+ PINMUX_IRQ(EXT_IRQ16L(6), 147),
+ PINMUX_IRQ(EXT_IRQ16L(2), 149),
+ PINMUX_IRQ(EXT_IRQ16L(7), 150),
+ PINMUX_IRQ(EXT_IRQ16L(12), 156),
+ PINMUX_IRQ(EXT_IRQ16L(4), 159),
+ PINMUX_IRQ(EXT_IRQ16H(25), 164),
+ PINMUX_IRQ(EXT_IRQ16L(8), 223),
+ PINMUX_IRQ(EXT_IRQ16L(3), 224),
+ PINMUX_IRQ(EXT_IRQ16L(5), 227),
+ PINMUX_IRQ(EXT_IRQ16H(17), 234),
+ PINMUX_IRQ(EXT_IRQ16L(11), 238),
+ PINMUX_IRQ(EXT_IRQ16L(13), 239),
+ PINMUX_IRQ(EXT_IRQ16H(16), 249),
+ PINMUX_IRQ(EXT_IRQ16L(14), 251),
+ PINMUX_IRQ(EXT_IRQ16L(9), 308),
+};
+
+#define PORTnCR_PULMD_OFF (0 << 6)
+#define PORTnCR_PULMD_DOWN (2 << 6)
+#define PORTnCR_PULMD_UP (3 << 6)
+#define PORTnCR_PULMD_MASK (3 << 6)
+
+static const unsigned int sh73a0_portcr_offsets[] = {
+ 0x00000000, 0x00001000, 0x00001000, 0x00002000, 0x00002000,
+ 0x00002000, 0x00002000, 0x00003000, 0x00003000, 0x00002000,
+};
+
+static unsigned int sh73a0_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
+{
+ void __iomem *addr = pfc->window->virt
+ + sh73a0_portcr_offsets[pin >> 5] + pin;
+ u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
+
+ switch (value) {
+ case PORTnCR_PULMD_UP:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case PORTnCR_PULMD_DOWN:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ case PORTnCR_PULMD_OFF:
+ default:
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
+}
+
+static void sh73a0_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ void __iomem *addr = pfc->window->virt
+ + sh73a0_portcr_offsets[pin >> 5] + pin;
+ u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
+
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value |= PORTnCR_PULMD_UP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value |= PORTnCR_PULMD_DOWN;
+ break;
+ }
+
+ iowrite8(value, addr);
+}
+
+static const struct sh_pfc_soc_operations sh73a0_pinmux_ops = {
+ .get_bias = sh73a0_pinmux_get_bias,
+ .set_bias = sh73a0_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info sh73a0_pinmux_info = {
.name = "sh73a0_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .ops = &sh73a0_pinmux_ops,
+
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
- .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PORT0,
- .last_gpio = GPIO_FN_FSIAISLD_PU,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .ranges = pinmux_ranges,
+ .nr_ranges = ARRAY_SIZE(pinmux_ranges),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
index 10872ed..52e9f6b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -262,7 +262,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
@@ -606,7 +606,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
@@ -759,202 +759,205 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* BSC */
- PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
- PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
- PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
- PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
- PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
- PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
- PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
- PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
- PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
- PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
- PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
- PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
- PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
- PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
- PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
- PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
- PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
- PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
- PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
- PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
- PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
- PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
- PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
- PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
- PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+ GPIO_FN(D31),
+ GPIO_FN(D30),
+ GPIO_FN(D29),
+ GPIO_FN(D28),
+ GPIO_FN(D27),
+ GPIO_FN(D26),
+ GPIO_FN(D25),
+ GPIO_FN(D24),
+ GPIO_FN(D23),
+ GPIO_FN(D22),
+ GPIO_FN(D21),
+ GPIO_FN(D20),
+ GPIO_FN(D19),
+ GPIO_FN(D18),
+ GPIO_FN(D17),
+ GPIO_FN(D16),
+ GPIO_FN(IOIS16),
+ GPIO_FN(RAS),
+ GPIO_FN(CAS),
+ GPIO_FN(CKE),
+ GPIO_FN(CS5B_CE1A),
+ GPIO_FN(CS6B_CE1B),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(A21),
+ GPIO_FN(A20),
+ GPIO_FN(A19),
+ GPIO_FN(A0),
+ GPIO_FN(REFOUT),
+ GPIO_FN(IRQOUT),
/* LCDC */
- PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+ GPIO_FN(LCD_DATA15),
+ GPIO_FN(LCD_DATA14),
+ GPIO_FN(LCD_DATA13),
+ GPIO_FN(LCD_DATA12),
+ GPIO_FN(LCD_DATA11),
+ GPIO_FN(LCD_DATA10),
+ GPIO_FN(LCD_DATA9),
+ GPIO_FN(LCD_DATA8),
+ GPIO_FN(LCD_DATA7),
+ GPIO_FN(LCD_DATA6),
+ GPIO_FN(LCD_DATA5),
+ GPIO_FN(LCD_DATA4),
+ GPIO_FN(LCD_DATA3),
+ GPIO_FN(LCD_DATA2),
+ GPIO_FN(LCD_DATA1),
+ GPIO_FN(LCD_DATA0),
+ GPIO_FN(LCD_M_DISP),
+ GPIO_FN(LCD_CL1),
+ GPIO_FN(LCD_CL2),
+ GPIO_FN(LCD_DON),
+ GPIO_FN(LCD_FLM),
+ GPIO_FN(LCD_VEPWC),
+ GPIO_FN(LCD_VCPWC),
/* AFEIF */
- PINMUX_GPIO(GPIO_FN_AFE_RXIN, AFE_RXIN_MARK),
- PINMUX_GPIO(GPIO_FN_AFE_RDET, AFE_RDET_MARK),
- PINMUX_GPIO(GPIO_FN_AFE_FS, AFE_FS_MARK),
- PINMUX_GPIO(GPIO_FN_AFE_TXOUT, AFE_TXOUT_MARK),
- PINMUX_GPIO(GPIO_FN_AFE_SCLK, AFE_SCLK_MARK),
- PINMUX_GPIO(GPIO_FN_AFE_RLYCNT, AFE_RLYCNT_MARK),
- PINMUX_GPIO(GPIO_FN_AFE_HC1, AFE_HC1_MARK),
+ GPIO_FN(AFE_RXIN),
+ GPIO_FN(AFE_RDET),
+ GPIO_FN(AFE_FS),
+ GPIO_FN(AFE_TXOUT),
+ GPIO_FN(AFE_SCLK),
+ GPIO_FN(AFE_RLYCNT),
+ GPIO_FN(AFE_HC1),
/* IIC */
- PINMUX_GPIO(GPIO_FN_IIC_SCL, IIC_SCL_MARK),
- PINMUX_GPIO(GPIO_FN_IIC_SDA, IIC_SDA_MARK),
+ GPIO_FN(IIC_SCL),
+ GPIO_FN(IIC_SDA),
/* DAC */
- PINMUX_GPIO(GPIO_FN_DA1, DA1_MARK),
- PINMUX_GPIO(GPIO_FN_DA0, DA0_MARK),
+ GPIO_FN(DA1),
+ GPIO_FN(DA0),
/* ADC */
- PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
- PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
- PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
- PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+ GPIO_FN(AN3),
+ GPIO_FN(AN2),
+ GPIO_FN(AN1),
+ GPIO_FN(AN0),
+ GPIO_FN(ADTRG),
/* USB */
- PINMUX_GPIO(GPIO_FN_USB1D_RCV, USB1D_RCV_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_TXSE0, USB1D_TXSE0_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_TXDPLS, USB1D_TXDPLS_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_DMNS, USB1D_DMNS_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_DPLS, USB1D_DPLS_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_SPEED, USB1D_SPEED_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_TXENL, USB1D_TXENL_MARK),
-
- PINMUX_GPIO(GPIO_FN_USB2_PWR_EN, USB2_PWR_EN_MARK),
- PINMUX_GPIO(GPIO_FN_USB1_PWR_EN_USBF_UPLUP,
- USB1_PWR_EN_USBF_UPLUP_MARK),
- PINMUX_GPIO(GPIO_FN_USB1D_SUSPEND, USB1D_SUSPEND_MARK),
+ GPIO_FN(USB1D_RCV),
+ GPIO_FN(USB1D_TXSE0),
+ GPIO_FN(USB1D_TXDPLS),
+ GPIO_FN(USB1D_DMNS),
+ GPIO_FN(USB1D_DPLS),
+ GPIO_FN(USB1D_SPEED),
+ GPIO_FN(USB1D_TXENL),
+
+ GPIO_FN(USB2_PWR_EN),
+ GPIO_FN(USB1_PWR_EN_USBF_UPLUP),
+ GPIO_FN(USB1D_SUSPEND),
/* INTC */
- PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3_IRL3, IRQ3_IRL3_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2_IRL2, IRQ2_IRL2_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1_IRL1, IRQ1_IRL1_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0_IRL0, IRQ0_IRL0_MARK),
+ GPIO_FN(IRQ5),
+ GPIO_FN(IRQ4),
+ GPIO_FN(IRQ3_IRL3),
+ GPIO_FN(IRQ2_IRL2),
+ GPIO_FN(IRQ1_IRL1),
+ GPIO_FN(IRQ0_IRL0),
/* PCC */
- PINMUX_GPIO(GPIO_FN_PCC_REG, PCC_REG_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_DRV, PCC_DRV_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_BVD2, PCC_BVD2_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_BVD1, PCC_BVD1_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_CD2, PCC_CD2_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_CD1, PCC_CD1_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_RESET, PCC_RESET_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_RDY, PCC_RDY_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_VS2, PCC_VS2_MARK),
- PINMUX_GPIO(GPIO_FN_PCC_VS1, PCC_VS1_MARK),
+ GPIO_FN(PCC_REG),
+ GPIO_FN(PCC_DRV),
+ GPIO_FN(PCC_BVD2),
+ GPIO_FN(PCC_BVD1),
+ GPIO_FN(PCC_CD2),
+ GPIO_FN(PCC_CD1),
+ GPIO_FN(PCC_RESET),
+ GPIO_FN(PCC_RDY),
+ GPIO_FN(PCC_VS2),
+ GPIO_FN(PCC_VS1),
/* HUDI */
- PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
- PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
- PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_ASEBRKAK, ASEBRKAK_MARK),
- PINMUX_GPIO(GPIO_FN_TRST, TRST_MARK),
- PINMUX_GPIO(GPIO_FN_TMS, TMS_MARK),
- PINMUX_GPIO(GPIO_FN_TDO, TDO_MARK),
- PINMUX_GPIO(GPIO_FN_TDI, TDI_MARK),
- PINMUX_GPIO(GPIO_FN_TCK, TCK_MARK),
+ GPIO_FN(AUDATA3),
+ GPIO_FN(AUDATA2),
+ GPIO_FN(AUDATA1),
+ GPIO_FN(AUDATA0),
+ GPIO_FN(AUDCK),
+ GPIO_FN(AUDSYNC),
+ GPIO_FN(ASEBRKAK),
+ GPIO_FN(TRST),
+ GPIO_FN(TMS),
+ GPIO_FN(TDO),
+ GPIO_FN(TDI),
+ GPIO_FN(TCK),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
- PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ GPIO_FN(DACK1),
+ GPIO_FN(DREQ1),
+ GPIO_FN(DACK0),
+ GPIO_FN(DREQ0),
+ GPIO_FN(TEND1),
+ GPIO_FN(TEND0),
/* SIOF0 */
- PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_MCLK, SIOF0_MCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+ GPIO_FN(SIOF0_SYNC),
+ GPIO_FN(SIOF0_MCLK),
+ GPIO_FN(SIOF0_TXD),
+ GPIO_FN(SIOF0_RXD),
+ GPIO_FN(SIOF0_SCK),
/* SIOF1 */
- PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_MCLK, SIOF1_MCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+ GPIO_FN(SIOF1_SYNC),
+ GPIO_FN(SIOF1_MCLK),
+ GPIO_FN(SIOF1_TXD),
+ GPIO_FN(SIOF1_RXD),
+ GPIO_FN(SIOF1_SCK),
/* SCIF0 */
- PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ GPIO_FN(SCIF0_TXD),
+ GPIO_FN(SCIF0_RXD),
+ GPIO_FN(SCIF0_RTS),
+ GPIO_FN(SCIF0_CTS),
+ GPIO_FN(SCIF0_SCK),
/* SCIF1 */
- PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ GPIO_FN(SCIF1_TXD),
+ GPIO_FN(SCIF1_RXD),
+ GPIO_FN(SCIF1_RTS),
+ GPIO_FN(SCIF1_CTS),
+ GPIO_FN(SCIF1_SCK),
/* TPU */
- PINMUX_GPIO(GPIO_FN_TPU_TO1, TPU_TO1_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TO0, TPU_TO0_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TI3B, TPU_TI3B_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TI3A, TPU_TI3A_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TI2B, TPU_TI2B_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TI2A, TPU_TI2A_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TO3, TPU_TO3_MARK),
- PINMUX_GPIO(GPIO_FN_TPU_TO2, TPU_TO2_MARK),
+ GPIO_FN(TPU_TO1),
+ GPIO_FN(TPU_TO0),
+ GPIO_FN(TPU_TI3B),
+ GPIO_FN(TPU_TI3A),
+ GPIO_FN(TPU_TI2B),
+ GPIO_FN(TPU_TI2A),
+ GPIO_FN(TPU_TO3),
+ GPIO_FN(TPU_TO2),
/* SIM */
- PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+ GPIO_FN(SIM_D),
+ GPIO_FN(SIM_CLK),
+ GPIO_FN(SIM_RST),
/* MMC */
- PINMUX_GPIO(GPIO_FN_MMC_DAT, MMC_DAT_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_VDDON, MMC_VDDON_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_ODMOD, MMC_ODMOD_MARK),
+ GPIO_FN(MMC_DAT),
+ GPIO_FN(MMC_CMD),
+ GPIO_FN(MMC_CLK),
+ GPIO_FN(MMC_VDDON),
+ GPIO_FN(MMC_ODMOD),
/* SYSC */
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ GPIO_FN(STATUS0),
+ GPIO_FN(STATUS1),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
@@ -1138,7 +1141,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xa4050140, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
@@ -1214,20 +1217,18 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7720_pinmux_info = {
+const struct sh_pfc_soc_info sh7720_pinmux_info = {
.name = "sh7720_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PTA7,
- .last_gpio = GPIO_FN_STATUS1,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
index 2de0929..3203438 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -296,7 +296,7 @@ enum {
PINMUX_FUNCTION_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PTA */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD),
@@ -787,7 +787,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
@@ -982,289 +982,293 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* SCIF0 */
- PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ GPIO_FN(SCIF0_TXD),
+ GPIO_FN(SCIF0_RXD),
+ GPIO_FN(SCIF0_RTS),
+ GPIO_FN(SCIF0_CTS),
+ GPIO_FN(SCIF0_SCK),
/* SCIF1 */
- PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ GPIO_FN(SCIF1_TXD),
+ GPIO_FN(SCIF1_RXD),
+ GPIO_FN(SCIF1_RTS),
+ GPIO_FN(SCIF1_CTS),
+ GPIO_FN(SCIF1_SCK),
/* SCIF2 */
- PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+ GPIO_FN(SCIF2_TXD),
+ GPIO_FN(SCIF2_RXD),
+ GPIO_FN(SCIF2_RTS),
+ GPIO_FN(SCIF2_CTS),
+ GPIO_FN(SCIF2_SCK),
/* SIO */
- PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK),
- PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK),
- PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK),
+ GPIO_FN(SIOTXD),
+ GPIO_FN(SIORXD),
+ GPIO_FN(SIOD),
+ GPIO_FN(SIOSTRB0),
+ GPIO_FN(SIOSTRB1),
+ GPIO_FN(SIOSCK),
+ GPIO_FN(SIOMCK),
/* CEU */
- PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+ GPIO_FN(VIO_D15),
+ GPIO_FN(VIO_D14),
+ GPIO_FN(VIO_D13),
+ GPIO_FN(VIO_D12),
+ GPIO_FN(VIO_D11),
+ GPIO_FN(VIO_D10),
+ GPIO_FN(VIO_D9),
+ GPIO_FN(VIO_D8),
+ GPIO_FN(VIO_D7),
+ GPIO_FN(VIO_D6),
+ GPIO_FN(VIO_D5),
+ GPIO_FN(VIO_D4),
+ GPIO_FN(VIO_D3),
+ GPIO_FN(VIO_D2),
+ GPIO_FN(VIO_D1),
+ GPIO_FN(VIO_D0),
+ GPIO_FN(VIO_CLK),
+ GPIO_FN(VIO_VD),
+ GPIO_FN(VIO_HD),
+ GPIO_FN(VIO_FLD),
+ GPIO_FN(VIO_CKO),
+ GPIO_FN(VIO_STEX),
+ GPIO_FN(VIO_STEM),
+ GPIO_FN(VIO_VD2),
+ GPIO_FN(VIO_HD2),
+ GPIO_FN(VIO_CLK2),
/* LCDC */
- PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
- PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+ GPIO_FN(LCDD23),
+ GPIO_FN(LCDD22),
+ GPIO_FN(LCDD21),
+ GPIO_FN(LCDD20),
+ GPIO_FN(LCDD19),
+ GPIO_FN(LCDD18),
+ GPIO_FN(LCDD17),
+ GPIO_FN(LCDD16),
+ GPIO_FN(LCDD15),
+ GPIO_FN(LCDD14),
+ GPIO_FN(LCDD13),
+ GPIO_FN(LCDD12),
+ GPIO_FN(LCDD11),
+ GPIO_FN(LCDD10),
+ GPIO_FN(LCDD9),
+ GPIO_FN(LCDD8),
+ GPIO_FN(LCDD7),
+ GPIO_FN(LCDD6),
+ GPIO_FN(LCDD5),
+ GPIO_FN(LCDD4),
+ GPIO_FN(LCDD3),
+ GPIO_FN(LCDD2),
+ GPIO_FN(LCDD1),
+ GPIO_FN(LCDD0),
+ GPIO_FN(LCDLCLK),
/* Main LCD */
- PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ GPIO_FN(LCDDON),
+ GPIO_FN(LCDVCPWC),
+ GPIO_FN(LCDVEPWC),
+ GPIO_FN(LCDVSYN),
/* Main LCD - RGB Mode */
- PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
- PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
- PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ GPIO_FN(LCDDCK),
+ GPIO_FN(LCDHSYN),
+ GPIO_FN(LCDDISP),
/* Main LCD - SYS Mode */
- PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
- PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
- PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
- PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ GPIO_FN(LCDRS),
+ GPIO_FN(LCDCS),
+ GPIO_FN(LCDWR),
+ GPIO_FN(LCDRD),
/* Sub LCD - SYS Mode */
- PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK),
+ GPIO_FN(LCDDON2),
+ GPIO_FN(LCDVCPWC2),
+ GPIO_FN(LCDVEPWC2),
+ GPIO_FN(LCDVSYN2),
+ GPIO_FN(LCDCS2),
/* BSC */
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
- PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
- PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
- PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ GPIO_FN(IOIS16),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(BS),
+ GPIO_FN(CS6B_CE1B),
+ GPIO_FN(WAIT),
+ GPIO_FN(CS6A_CE2B),
/* SBSC */
- PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK),
- PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK),
- PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK),
- PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK),
- PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK),
- PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK),
- PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK),
- PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK),
- PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK),
- PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK),
- PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK),
- PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK),
- PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK),
- PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK),
- PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK),
- PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK),
- PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK),
- PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK),
- PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK),
- PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK),
+ GPIO_FN(HPD63),
+ GPIO_FN(HPD62),
+ GPIO_FN(HPD61),
+ GPIO_FN(HPD60),
+ GPIO_FN(HPD59),
+ GPIO_FN(HPD58),
+ GPIO_FN(HPD57),
+ GPIO_FN(HPD56),
+ GPIO_FN(HPD55),
+ GPIO_FN(HPD54),
+ GPIO_FN(HPD53),
+ GPIO_FN(HPD52),
+ GPIO_FN(HPD51),
+ GPIO_FN(HPD50),
+ GPIO_FN(HPD49),
+ GPIO_FN(HPD48),
+ GPIO_FN(HPDQM7),
+ GPIO_FN(HPDQM6),
+ GPIO_FN(HPDQM5),
+ GPIO_FN(HPDQM4),
/* IRQ */
- PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+ GPIO_FN(IRQ0),
+ GPIO_FN(IRQ1),
+ GPIO_FN(IRQ2),
+ GPIO_FN(IRQ3),
+ GPIO_FN(IRQ4),
+ GPIO_FN(IRQ5),
+ GPIO_FN(IRQ6),
+ GPIO_FN(IRQ7),
/* SDHI */
- PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK),
- PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK),
- PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK),
- PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK),
- PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK),
- PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK),
+ GPIO_FN(SDHICD),
+ GPIO_FN(SDHIWP),
+ GPIO_FN(SDHID3),
+ GPIO_FN(SDHID2),
+ GPIO_FN(SDHID1),
+ GPIO_FN(SDHID0),
+ GPIO_FN(SDHICMD),
+ GPIO_FN(SDHICLK),
/* SIU - Port A */
- PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK),
- PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK),
+ GPIO_FN(SIUAOLR),
+ GPIO_FN(SIUAOBT),
+ GPIO_FN(SIUAISLD),
+ GPIO_FN(SIUAILR),
+ GPIO_FN(SIUAIBT),
+ GPIO_FN(SIUAOSLD),
+ GPIO_FN(SIUMCKA),
+ GPIO_FN(SIUFCKA),
/* SIU - Port B */
- PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK),
- PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK),
+ GPIO_FN(SIUBOLR),
+ GPIO_FN(SIUBOBT),
+ GPIO_FN(SIUBISLD),
+ GPIO_FN(SIUBILR),
+ GPIO_FN(SIUBIBT),
+ GPIO_FN(SIUBOSLD),
+ GPIO_FN(SIUMCKB),
+ GPIO_FN(SIUFCKB),
/* AUD */
- PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ GPIO_FN(AUDSYNC),
+ GPIO_FN(AUDATA3),
+ GPIO_FN(AUDATA2),
+ GPIO_FN(AUDATA1),
+ GPIO_FN(AUDATA0),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ GPIO_FN(DACK),
+ GPIO_FN(DREQ0),
/* VOU */
- PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
- PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+ GPIO_FN(DV_CLKI),
+ GPIO_FN(DV_CLK),
+ GPIO_FN(DV_HSYNC),
+ GPIO_FN(DV_VSYNC),
+ GPIO_FN(DV_D15),
+ GPIO_FN(DV_D14),
+ GPIO_FN(DV_D13),
+ GPIO_FN(DV_D12),
+ GPIO_FN(DV_D11),
+ GPIO_FN(DV_D10),
+ GPIO_FN(DV_D9),
+ GPIO_FN(DV_D8),
+ GPIO_FN(DV_D7),
+ GPIO_FN(DV_D6),
+ GPIO_FN(DV_D5),
+ GPIO_FN(DV_D4),
+ GPIO_FN(DV_D3),
+ GPIO_FN(DV_D2),
+ GPIO_FN(DV_D1),
+ GPIO_FN(DV_D0),
/* CPG */
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+ GPIO_FN(STATUS0),
+ GPIO_FN(PDSTATUS),
/* SIOF0 */
- PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+ GPIO_FN(SIOF0_MCK),
+ GPIO_FN(SIOF0_SCK),
+ GPIO_FN(SIOF0_SYNC),
+ GPIO_FN(SIOF0_SS1),
+ GPIO_FN(SIOF0_SS2),
+ GPIO_FN(SIOF0_TXD),
+ GPIO_FN(SIOF0_RXD),
/* SIOF1 */
- PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+ GPIO_FN(SIOF1_MCK),
+ GPIO_FN(SIOF1_SCK),
+ GPIO_FN(SIOF1_SYNC),
+ GPIO_FN(SIOF1_SS1),
+ GPIO_FN(SIOF1_SS2),
+ GPIO_FN(SIOF1_TXD),
+ GPIO_FN(SIOF1_RXD),
/* SIM */
- PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+ GPIO_FN(SIM_D),
+ GPIO_FN(SIM_CLK),
+ GPIO_FN(SIM_RST),
/* TSIF */
- PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK),
- PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK),
- PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK),
+ GPIO_FN(TS_SDAT),
+ GPIO_FN(TS_SCK),
+ GPIO_FN(TS_SDEN),
+ GPIO_FN(TS_SPSYNC),
/* IRDA */
- PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
- PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+ GPIO_FN(IRDA_IN),
+ GPIO_FN(IRDA_OUT),
/* TPU */
- PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK),
+ GPIO_FN(TPUTO),
/* FLCTL */
- PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
- PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
- PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
- PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
- PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
- PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
- PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
- PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
- PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
- PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
- PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
- PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
- PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ GPIO_FN(FCE),
+ GPIO_FN(NAF7),
+ GPIO_FN(NAF6),
+ GPIO_FN(NAF5),
+ GPIO_FN(NAF4),
+ GPIO_FN(NAF3),
+ GPIO_FN(NAF2),
+ GPIO_FN(NAF1),
+ GPIO_FN(NAF0),
+ GPIO_FN(FCDE),
+ GPIO_FN(FOE),
+ GPIO_FN(FSC),
+ GPIO_FN(FWE),
+ GPIO_FN(FRB),
/* KEYSC */
- PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+ GPIO_FN(KEYIN0),
+ GPIO_FN(KEYIN1),
+ GPIO_FN(KEYIN2),
+ GPIO_FN(KEYIN3),
+ GPIO_FN(KEYIN4),
+ GPIO_FN(KEYOUT0),
+ GPIO_FN(KEYOUT1),
+ GPIO_FN(KEYOUT2),
+ GPIO_FN(KEYOUT3),
+ GPIO_FN(KEYOUT4_IN6),
+ GPIO_FN(KEYOUT5_IN5),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN,
VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN,
@@ -1660,7 +1664,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
@@ -1756,21 +1760,19 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7722_pinmux_info = {
+const struct sh_pfc_soc_info sh7722_pinmux_info = {
.name = "sh7722_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PTA7,
- .last_gpio = GPIO_FN_KEYOUT5_IN5,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
index 609673d..07ad1d8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -350,7 +350,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
@@ -923,7 +923,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
@@ -1139,379 +1139,383 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* SCIF0 */
- PINMUX_GPIO(GPIO_FN_SCIF0_PTT_TXD, SCIF0_PTT_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_PTT_RXD, SCIF0_PTT_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_PTT_SCK, SCIF0_PTT_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_PTU_TXD, SCIF0_PTU_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_PTU_RXD, SCIF0_PTU_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_PTU_SCK, SCIF0_PTU_SCK_MARK),
+ GPIO_FN(SCIF0_PTT_TXD),
+ GPIO_FN(SCIF0_PTT_RXD),
+ GPIO_FN(SCIF0_PTT_SCK),
+ GPIO_FN(SCIF0_PTU_TXD),
+ GPIO_FN(SCIF0_PTU_RXD),
+ GPIO_FN(SCIF0_PTU_SCK),
/* SCIF1 */
- PINMUX_GPIO(GPIO_FN_SCIF1_PTS_TXD, SCIF1_PTS_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_PTS_RXD, SCIF1_PTS_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_PTS_SCK, SCIF1_PTS_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_PTV_TXD, SCIF1_PTV_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_PTV_RXD, SCIF1_PTV_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_PTV_SCK, SCIF1_PTV_SCK_MARK),
+ GPIO_FN(SCIF1_PTS_TXD),
+ GPIO_FN(SCIF1_PTS_RXD),
+ GPIO_FN(SCIF1_PTS_SCK),
+ GPIO_FN(SCIF1_PTV_TXD),
+ GPIO_FN(SCIF1_PTV_RXD),
+ GPIO_FN(SCIF1_PTV_SCK),
/* SCIF2 */
- PINMUX_GPIO(GPIO_FN_SCIF2_PTT_TXD, SCIF2_PTT_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_PTT_RXD, SCIF2_PTT_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_PTT_SCK, SCIF2_PTT_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_PTU_TXD, SCIF2_PTU_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_PTU_RXD, SCIF2_PTU_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_PTU_SCK, SCIF2_PTU_SCK_MARK),
+ GPIO_FN(SCIF2_PTT_TXD),
+ GPIO_FN(SCIF2_PTT_RXD),
+ GPIO_FN(SCIF2_PTT_SCK),
+ GPIO_FN(SCIF2_PTU_TXD),
+ GPIO_FN(SCIF2_PTU_RXD),
+ GPIO_FN(SCIF2_PTU_SCK),
/* SCIF3 */
- PINMUX_GPIO(GPIO_FN_SCIF3_PTS_TXD, SCIF3_PTS_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RXD, SCIF3_PTS_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTS_SCK, SCIF3_PTS_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RTS, SCIF3_PTS_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTS_CTS, SCIF3_PTS_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTV_TXD, SCIF3_PTV_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RXD, SCIF3_PTV_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTV_SCK, SCIF3_PTV_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RTS, SCIF3_PTV_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_PTV_CTS, SCIF3_PTV_CTS_MARK),
+ GPIO_FN(SCIF3_PTS_TXD),
+ GPIO_FN(SCIF3_PTS_RXD),
+ GPIO_FN(SCIF3_PTS_SCK),
+ GPIO_FN(SCIF3_PTS_RTS),
+ GPIO_FN(SCIF3_PTS_CTS),
+ GPIO_FN(SCIF3_PTV_TXD),
+ GPIO_FN(SCIF3_PTV_RXD),
+ GPIO_FN(SCIF3_PTV_SCK),
+ GPIO_FN(SCIF3_PTV_RTS),
+ GPIO_FN(SCIF3_PTV_CTS),
/* SCIF4 */
- PINMUX_GPIO(GPIO_FN_SCIF4_PTE_TXD, SCIF4_PTE_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_PTE_RXD, SCIF4_PTE_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_PTE_SCK, SCIF4_PTE_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_PTN_TXD, SCIF4_PTN_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_PTN_RXD, SCIF4_PTN_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_PTN_SCK, SCIF4_PTN_SCK_MARK),
+ GPIO_FN(SCIF4_PTE_TXD),
+ GPIO_FN(SCIF4_PTE_RXD),
+ GPIO_FN(SCIF4_PTE_SCK),
+ GPIO_FN(SCIF4_PTN_TXD),
+ GPIO_FN(SCIF4_PTN_RXD),
+ GPIO_FN(SCIF4_PTN_SCK),
/* SCIF5 */
- PINMUX_GPIO(GPIO_FN_SCIF5_PTE_TXD, SCIF5_PTE_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_PTE_RXD, SCIF5_PTE_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_PTE_SCK, SCIF5_PTE_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_PTN_TXD, SCIF5_PTN_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_PTN_RXD, SCIF5_PTN_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_PTN_SCK, SCIF5_PTN_SCK_MARK),
+ GPIO_FN(SCIF5_PTE_TXD),
+ GPIO_FN(SCIF5_PTE_RXD),
+ GPIO_FN(SCIF5_PTE_SCK),
+ GPIO_FN(SCIF5_PTN_TXD),
+ GPIO_FN(SCIF5_PTN_RXD),
+ GPIO_FN(SCIF5_PTN_SCK),
/* CEU */
- PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_CLK1, VIO_CLK1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_VD1, VIO_VD1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_HD1, VIO_HD1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+ GPIO_FN(VIO_D15),
+ GPIO_FN(VIO_D14),
+ GPIO_FN(VIO_D13),
+ GPIO_FN(VIO_D12),
+ GPIO_FN(VIO_D11),
+ GPIO_FN(VIO_D10),
+ GPIO_FN(VIO_D9),
+ GPIO_FN(VIO_D8),
+ GPIO_FN(VIO_D7),
+ GPIO_FN(VIO_D6),
+ GPIO_FN(VIO_D5),
+ GPIO_FN(VIO_D4),
+ GPIO_FN(VIO_D3),
+ GPIO_FN(VIO_D2),
+ GPIO_FN(VIO_D1),
+ GPIO_FN(VIO_D0),
+ GPIO_FN(VIO_CLK1),
+ GPIO_FN(VIO_VD1),
+ GPIO_FN(VIO_HD1),
+ GPIO_FN(VIO_FLD),
+ GPIO_FN(VIO_CKO),
+ GPIO_FN(VIO_VD2),
+ GPIO_FN(VIO_HD2),
+ GPIO_FN(VIO_CLK2),
/* LCDC */
- PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
- PINMUX_GPIO(GPIO_FN_LCDLCLK_PTR, LCDLCLK_PTR_MARK),
- PINMUX_GPIO(GPIO_FN_LCDLCLK_PTW, LCDLCLK_PTW_MARK),
+ GPIO_FN(LCDD23),
+ GPIO_FN(LCDD22),
+ GPIO_FN(LCDD21),
+ GPIO_FN(LCDD20),
+ GPIO_FN(LCDD19),
+ GPIO_FN(LCDD18),
+ GPIO_FN(LCDD17),
+ GPIO_FN(LCDD16),
+ GPIO_FN(LCDD15),
+ GPIO_FN(LCDD14),
+ GPIO_FN(LCDD13),
+ GPIO_FN(LCDD12),
+ GPIO_FN(LCDD11),
+ GPIO_FN(LCDD10),
+ GPIO_FN(LCDD9),
+ GPIO_FN(LCDD8),
+ GPIO_FN(LCDD7),
+ GPIO_FN(LCDD6),
+ GPIO_FN(LCDD5),
+ GPIO_FN(LCDD4),
+ GPIO_FN(LCDD3),
+ GPIO_FN(LCDD2),
+ GPIO_FN(LCDD1),
+ GPIO_FN(LCDD0),
+ GPIO_FN(LCDLCLK_PTR),
+ GPIO_FN(LCDLCLK_PTW),
/* Main LCD */
- PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ GPIO_FN(LCDDON),
+ GPIO_FN(LCDVCPWC),
+ GPIO_FN(LCDVEPWC),
+ GPIO_FN(LCDVSYN),
/* Main LCD - RGB Mode */
- PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
- PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
- PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ GPIO_FN(LCDDCK),
+ GPIO_FN(LCDHSYN),
+ GPIO_FN(LCDDISP),
/* Main LCD - SYS Mode */
- PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
- PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
- PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
- PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ GPIO_FN(LCDRS),
+ GPIO_FN(LCDCS),
+ GPIO_FN(LCDWR),
+ GPIO_FN(LCDRD),
/* IRQ */
- PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+ GPIO_FN(IRQ0),
+ GPIO_FN(IRQ1),
+ GPIO_FN(IRQ2),
+ GPIO_FN(IRQ3),
+ GPIO_FN(IRQ4),
+ GPIO_FN(IRQ5),
+ GPIO_FN(IRQ6),
+ GPIO_FN(IRQ7),
/* AUD */
- PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
- PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ GPIO_FN(AUDCK),
+ GPIO_FN(AUDSYNC),
+ GPIO_FN(AUDATA3),
+ GPIO_FN(AUDATA2),
+ GPIO_FN(AUDATA1),
+ GPIO_FN(AUDATA0),
/* SDHI0 (PTD) */
- PINMUX_GPIO(GPIO_FN_SDHI0CD_PTD, SDHI0CD_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0WP_PTD, SDHI0WP_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D3_PTD, SDHI0D3_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D2_PTD, SDHI0D2_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D1_PTD, SDHI0D1_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D0_PTD, SDHI0D0_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTD, SDHI0CMD_PTD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTD, SDHI0CLK_PTD_MARK),
+ GPIO_FN(SDHI0CD_PTD),
+ GPIO_FN(SDHI0WP_PTD),
+ GPIO_FN(SDHI0D3_PTD),
+ GPIO_FN(SDHI0D2_PTD),
+ GPIO_FN(SDHI0D1_PTD),
+ GPIO_FN(SDHI0D0_PTD),
+ GPIO_FN(SDHI0CMD_PTD),
+ GPIO_FN(SDHI0CLK_PTD),
/* SDHI0 (PTS) */
- PINMUX_GPIO(GPIO_FN_SDHI0CD_PTS, SDHI0CD_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0WP_PTS, SDHI0WP_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D3_PTS, SDHI0D3_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D2_PTS, SDHI0D2_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D1_PTS, SDHI0D1_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D0_PTS, SDHI0D0_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTS, SDHI0CMD_PTS_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTS, SDHI0CLK_PTS_MARK),
+ GPIO_FN(SDHI0CD_PTS),
+ GPIO_FN(SDHI0WP_PTS),
+ GPIO_FN(SDHI0D3_PTS),
+ GPIO_FN(SDHI0D2_PTS),
+ GPIO_FN(SDHI0D1_PTS),
+ GPIO_FN(SDHI0D0_PTS),
+ GPIO_FN(SDHI0CMD_PTS),
+ GPIO_FN(SDHI0CLK_PTS),
/* SDHI1 */
- PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+ GPIO_FN(SDHI1CD),
+ GPIO_FN(SDHI1WP),
+ GPIO_FN(SDHI1D3),
+ GPIO_FN(SDHI1D2),
+ GPIO_FN(SDHI1D1),
+ GPIO_FN(SDHI1D0),
+ GPIO_FN(SDHI1CMD),
+ GPIO_FN(SDHI1CLK),
/* SIUA */
- PINMUX_GPIO(GPIO_FN_SIUAFCK, SIUAFCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAMCK, SIUAMCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAISPD, SIUAISPD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUAOSPD, SIUAOSPD_MARK),
+ GPIO_FN(SIUAFCK),
+ GPIO_FN(SIUAILR),
+ GPIO_FN(SIUAIBT),
+ GPIO_FN(SIUAISLD),
+ GPIO_FN(SIUAOLR),
+ GPIO_FN(SIUAOBT),
+ GPIO_FN(SIUAOSLD),
+ GPIO_FN(SIUAMCK),
+ GPIO_FN(SIUAISPD),
+ GPIO_FN(SIUAOSPD),
/* SIUB */
- PINMUX_GPIO(GPIO_FN_SIUBFCK, SIUBFCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
- PINMUX_GPIO(GPIO_FN_SIUBMCK, SIUBMCK_MARK),
+ GPIO_FN(SIUBFCK),
+ GPIO_FN(SIUBILR),
+ GPIO_FN(SIUBIBT),
+ GPIO_FN(SIUBISLD),
+ GPIO_FN(SIUBOLR),
+ GPIO_FN(SIUBOBT),
+ GPIO_FN(SIUBOSLD),
+ GPIO_FN(SIUBMCK),
/* IRDA */
- PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
- PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+ GPIO_FN(IRDA_IN),
+ GPIO_FN(IRDA_OUT),
/* VOU */
- PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
- PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+ GPIO_FN(DV_CLKI),
+ GPIO_FN(DV_CLK),
+ GPIO_FN(DV_HSYNC),
+ GPIO_FN(DV_VSYNC),
+ GPIO_FN(DV_D15),
+ GPIO_FN(DV_D14),
+ GPIO_FN(DV_D13),
+ GPIO_FN(DV_D12),
+ GPIO_FN(DV_D11),
+ GPIO_FN(DV_D10),
+ GPIO_FN(DV_D9),
+ GPIO_FN(DV_D8),
+ GPIO_FN(DV_D7),
+ GPIO_FN(DV_D6),
+ GPIO_FN(DV_D5),
+ GPIO_FN(DV_D4),
+ GPIO_FN(DV_D3),
+ GPIO_FN(DV_D2),
+ GPIO_FN(DV_D1),
+ GPIO_FN(DV_D0),
/* KEYSC */
- PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+ GPIO_FN(KEYIN0),
+ GPIO_FN(KEYIN1),
+ GPIO_FN(KEYIN2),
+ GPIO_FN(KEYIN3),
+ GPIO_FN(KEYIN4),
+ GPIO_FN(KEYOUT0),
+ GPIO_FN(KEYOUT1),
+ GPIO_FN(KEYOUT2),
+ GPIO_FN(KEYOUT3),
+ GPIO_FN(KEYOUT4_IN6),
+ GPIO_FN(KEYOUT5_IN5),
/* MSIOF0 (PTF) */
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TXD, MSIOF0_PTF_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RXD, MSIOF0_PTF_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_MCK, MSIOF0_PTF_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSYNC, MSIOF0_PTF_TSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSCK, MSIOF0_PTF_TSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSYNC, MSIOF0_PTF_RSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSCK, MSIOF0_PTF_RSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS1, MSIOF0_PTF_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS2, MSIOF0_PTF_SS2_MARK),
+ GPIO_FN(MSIOF0_PTF_TXD),
+ GPIO_FN(MSIOF0_PTF_RXD),
+ GPIO_FN(MSIOF0_PTF_MCK),
+ GPIO_FN(MSIOF0_PTF_TSYNC),
+ GPIO_FN(MSIOF0_PTF_TSCK),
+ GPIO_FN(MSIOF0_PTF_RSYNC),
+ GPIO_FN(MSIOF0_PTF_RSCK),
+ GPIO_FN(MSIOF0_PTF_SS1),
+ GPIO_FN(MSIOF0_PTF_SS2),
/* MSIOF0 (PTT+PTX) */
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TXD, MSIOF0_PTT_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RXD, MSIOF0_PTT_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTX_MCK, MSIOF0_PTX_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSYNC, MSIOF0_PTT_TSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSCK, MSIOF0_PTT_TSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSYNC, MSIOF0_PTT_RSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSCK, MSIOF0_PTT_RSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS1, MSIOF0_PTT_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS2, MSIOF0_PTT_SS2_MARK),
+ GPIO_FN(MSIOF0_PTT_TXD),
+ GPIO_FN(MSIOF0_PTT_RXD),
+ GPIO_FN(MSIOF0_PTX_MCK),
+ GPIO_FN(MSIOF0_PTT_TSYNC),
+ GPIO_FN(MSIOF0_PTT_TSCK),
+ GPIO_FN(MSIOF0_PTT_RSYNC),
+ GPIO_FN(MSIOF0_PTT_RSCK),
+ GPIO_FN(MSIOF0_PTT_SS1),
+ GPIO_FN(MSIOF0_PTT_SS2),
/* MSIOF1 */
- PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+ GPIO_FN(MSIOF1_TXD),
+ GPIO_FN(MSIOF1_RXD),
+ GPIO_FN(MSIOF1_MCK),
+ GPIO_FN(MSIOF1_TSYNC),
+ GPIO_FN(MSIOF1_TSCK),
+ GPIO_FN(MSIOF1_RSYNC),
+ GPIO_FN(MSIOF1_RSCK),
+ GPIO_FN(MSIOF1_SS1),
+ GPIO_FN(MSIOF1_SS2),
/* TSIF */
- PINMUX_GPIO(GPIO_FN_TS0_SDAT, TS0_SDAT_MARK),
- PINMUX_GPIO(GPIO_FN_TS0_SCK, TS0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_TS0_SDEN, TS0_SDEN_MARK),
- PINMUX_GPIO(GPIO_FN_TS0_SPSYNC, TS0_SPSYNC_MARK),
+ GPIO_FN(TS0_SDAT),
+ GPIO_FN(TS0_SCK),
+ GPIO_FN(TS0_SDEN),
+ GPIO_FN(TS0_SPSYNC),
/* FLCTL */
- PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
- PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
- PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
- PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
- PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
- PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
- PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
- PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
- PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
- PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
- PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
- PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
- PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ GPIO_FN(FCE),
+ GPIO_FN(NAF7),
+ GPIO_FN(NAF6),
+ GPIO_FN(NAF5),
+ GPIO_FN(NAF4),
+ GPIO_FN(NAF3),
+ GPIO_FN(NAF2),
+ GPIO_FN(NAF1),
+ GPIO_FN(NAF0),
+ GPIO_FN(FCDE),
+ GPIO_FN(FOE),
+ GPIO_FN(FSC),
+ GPIO_FN(FWE),
+ GPIO_FN(FRB),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ GPIO_FN(DACK1),
+ GPIO_FN(DREQ1),
+ GPIO_FN(DACK0),
+ GPIO_FN(DREQ0),
/* ADC */
- PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
- PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
- PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
- PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+ GPIO_FN(AN3),
+ GPIO_FN(AN2),
+ GPIO_FN(AN1),
+ GPIO_FN(AN0),
+ GPIO_FN(ADTRG),
/* CPG */
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+ GPIO_FN(STATUS0),
+ GPIO_FN(PDSTATUS),
/* TPU */
- PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+ GPIO_FN(TPUTO0),
+ GPIO_FN(TPUTO1),
+ GPIO_FN(TPUTO2),
+ GPIO_FN(TPUTO3),
/* BSC */
- PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
- PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
- PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
- PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
- PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
- PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
- PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
- PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
- PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
- PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
- PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
- PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
- PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
- PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
- PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
- PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
- PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
- PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
- PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+ GPIO_FN(D31),
+ GPIO_FN(D30),
+ GPIO_FN(D29),
+ GPIO_FN(D28),
+ GPIO_FN(D27),
+ GPIO_FN(D26),
+ GPIO_FN(D25),
+ GPIO_FN(D24),
+ GPIO_FN(D23),
+ GPIO_FN(D22),
+ GPIO_FN(D21),
+ GPIO_FN(D20),
+ GPIO_FN(D19),
+ GPIO_FN(D18),
+ GPIO_FN(D17),
+ GPIO_FN(D16),
+ GPIO_FN(IOIS16),
+ GPIO_FN(WAIT),
+ GPIO_FN(BS),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(CS6B_CE1B),
+ GPIO_FN(CS6A_CE2B),
+ GPIO_FN(CS5B_CE1A),
+ GPIO_FN(CS5A_CE2A),
+ GPIO_FN(WE3_ICIOWR),
+ GPIO_FN(WE2_ICIORD),
/* ATAPI */
- PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
- PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
- PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
- PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
- PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
- PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
- PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
- PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
- PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
- PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
- PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
- PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
- PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
- PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
- PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
- PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
- PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
- PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
- PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
- PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
- PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
- PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
- PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
- PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
- PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
- PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
- PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
- PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
- PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
- PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+ GPIO_FN(IDED15),
+ GPIO_FN(IDED14),
+ GPIO_FN(IDED13),
+ GPIO_FN(IDED12),
+ GPIO_FN(IDED11),
+ GPIO_FN(IDED10),
+ GPIO_FN(IDED9),
+ GPIO_FN(IDED8),
+ GPIO_FN(IDED7),
+ GPIO_FN(IDED6),
+ GPIO_FN(IDED5),
+ GPIO_FN(IDED4),
+ GPIO_FN(IDED3),
+ GPIO_FN(IDED2),
+ GPIO_FN(IDED1),
+ GPIO_FN(IDED0),
+ GPIO_FN(DIRECTION),
+ GPIO_FN(EXBUF_ENB),
+ GPIO_FN(IDERST),
+ GPIO_FN(IODACK),
+ GPIO_FN(IODREQ),
+ GPIO_FN(IDEIORDY),
+ GPIO_FN(IDEINT),
+ GPIO_FN(IDEIOWR),
+ GPIO_FN(IDEIORD),
+ GPIO_FN(IDECS1),
+ GPIO_FN(IDECS0),
+ GPIO_FN(IDEA2),
+ GPIO_FN(IDEA1),
+ GPIO_FN(IDEA0),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
PTA7_FN, PTA7_OUT, 0, PTA7_IN,
PTA6_FN, PTA6_OUT, 0, PTA6_IN,
@@ -1785,7 +1789,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
@@ -1881,20 +1885,18 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7723_pinmux_info = {
+const struct sh_pfc_soc_info sh7723_pinmux_info = {
.name = "sh7723_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PTA7,
- .last_gpio = GPIO_FN_IDEA0,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
index 233fbf7..35e5516 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -572,7 +572,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
@@ -1192,7 +1192,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
@@ -1418,372 +1418,376 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* BSC */
- PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
- PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
- PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
- PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
- PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
- PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
- PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
- PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
- PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
- PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
- PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
- PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
- PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
- PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
- PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
- PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
- PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
- PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
- PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
- PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
- PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
- PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
- PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
- PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
- PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
- PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
- PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
- PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
- PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
- PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
- PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
- PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
- PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
- PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
- PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ GPIO_FN(D31),
+ GPIO_FN(D30),
+ GPIO_FN(D29),
+ GPIO_FN(D28),
+ GPIO_FN(D27),
+ GPIO_FN(D26),
+ GPIO_FN(D25),
+ GPIO_FN(D24),
+ GPIO_FN(D23),
+ GPIO_FN(D22),
+ GPIO_FN(D21),
+ GPIO_FN(D20),
+ GPIO_FN(D19),
+ GPIO_FN(D18),
+ GPIO_FN(D17),
+ GPIO_FN(D16),
+ GPIO_FN(D15),
+ GPIO_FN(D14),
+ GPIO_FN(D13),
+ GPIO_FN(D12),
+ GPIO_FN(D11),
+ GPIO_FN(D10),
+ GPIO_FN(D9),
+ GPIO_FN(D8),
+ GPIO_FN(D7),
+ GPIO_FN(D6),
+ GPIO_FN(D5),
+ GPIO_FN(D4),
+ GPIO_FN(D3),
+ GPIO_FN(D2),
+ GPIO_FN(D1),
+ GPIO_FN(D0),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(CS6B_CE1B),
+ GPIO_FN(CS6A_CE2B),
+ GPIO_FN(CS5B_CE1A),
+ GPIO_FN(CS5A_CE2A),
+ GPIO_FN(WE3_ICIOWR),
+ GPIO_FN(WE2_ICIORD),
+ GPIO_FN(IOIS16),
+ GPIO_FN(WAIT),
+ GPIO_FN(BS),
/* KEYSC */
- PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
- PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
- PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ GPIO_FN(KEYOUT5_IN5),
+ GPIO_FN(KEYOUT4_IN6),
+ GPIO_FN(KEYIN4),
+ GPIO_FN(KEYIN3),
+ GPIO_FN(KEYIN2),
+ GPIO_FN(KEYIN1),
+ GPIO_FN(KEYIN0),
+ GPIO_FN(KEYOUT3),
+ GPIO_FN(KEYOUT2),
+ GPIO_FN(KEYOUT1),
+ GPIO_FN(KEYOUT0),
/* ATAPI */
- PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
- PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
- PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
- PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
- PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
- PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
- PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
- PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
- PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
- PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
- PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
- PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
- PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
- PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
- PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
- PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
- PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
- PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
- PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
- PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
- PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
- PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
- PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
- PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
- PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
- PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
- PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
- PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
- PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
- PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+ GPIO_FN(IDED15),
+ GPIO_FN(IDED14),
+ GPIO_FN(IDED13),
+ GPIO_FN(IDED12),
+ GPIO_FN(IDED11),
+ GPIO_FN(IDED10),
+ GPIO_FN(IDED9),
+ GPIO_FN(IDED8),
+ GPIO_FN(IDED7),
+ GPIO_FN(IDED6),
+ GPIO_FN(IDED5),
+ GPIO_FN(IDED4),
+ GPIO_FN(IDED3),
+ GPIO_FN(IDED2),
+ GPIO_FN(IDED1),
+ GPIO_FN(IDED0),
+ GPIO_FN(IDEA2),
+ GPIO_FN(IDEA1),
+ GPIO_FN(IDEA0),
+ GPIO_FN(IDEIOWR),
+ GPIO_FN(IODREQ),
+ GPIO_FN(IDECS0),
+ GPIO_FN(IDECS1),
+ GPIO_FN(IDEIORD),
+ GPIO_FN(DIRECTION),
+ GPIO_FN(EXBUF_ENB),
+ GPIO_FN(IDERST),
+ GPIO_FN(IODACK),
+ GPIO_FN(IDEINT),
+ GPIO_FN(IDEIORDY),
/* TPU */
- PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTI3, TPUTI3_MARK),
- PINMUX_GPIO(GPIO_FN_TPUTI2, TPUTI2_MARK),
+ GPIO_FN(TPUTO3),
+ GPIO_FN(TPUTO2),
+ GPIO_FN(TPUTO1),
+ GPIO_FN(TPUTO0),
+ GPIO_FN(TPUTI3),
+ GPIO_FN(TPUTI2),
/* LCDC */
- PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
- PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
- PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
- PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
- PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
- PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
- PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
- PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
- PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
- PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
- PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+ GPIO_FN(LCDD23),
+ GPIO_FN(LCDD22),
+ GPIO_FN(LCDD21),
+ GPIO_FN(LCDD20),
+ GPIO_FN(LCDD19),
+ GPIO_FN(LCDD18),
+ GPIO_FN(LCDD17),
+ GPIO_FN(LCDD16),
+ GPIO_FN(LCDD15),
+ GPIO_FN(LCDD14),
+ GPIO_FN(LCDD13),
+ GPIO_FN(LCDD12),
+ GPIO_FN(LCDD11),
+ GPIO_FN(LCDD10),
+ GPIO_FN(LCDD9),
+ GPIO_FN(LCDD8),
+ GPIO_FN(LCDD7),
+ GPIO_FN(LCDD6),
+ GPIO_FN(LCDD5),
+ GPIO_FN(LCDD4),
+ GPIO_FN(LCDD3),
+ GPIO_FN(LCDD2),
+ GPIO_FN(LCDD1),
+ GPIO_FN(LCDD0),
+ GPIO_FN(LCDVSYN),
+ GPIO_FN(LCDDISP),
+ GPIO_FN(LCDRS),
+ GPIO_FN(LCDHSYN),
+ GPIO_FN(LCDCS),
+ GPIO_FN(LCDDON),
+ GPIO_FN(LCDDCK),
+ GPIO_FN(LCDWR),
+ GPIO_FN(LCDVEPWC),
+ GPIO_FN(LCDVCPWC),
+ GPIO_FN(LCDRD),
+ GPIO_FN(LCDLCLK),
/* SCIF0 */
- PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ GPIO_FN(SCIF0_TXD),
+ GPIO_FN(SCIF0_RXD),
+ GPIO_FN(SCIF0_SCK),
/* SCIF1 */
- PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ GPIO_FN(SCIF1_SCK),
+ GPIO_FN(SCIF1_RXD),
+ GPIO_FN(SCIF1_TXD),
/* SCIF2 */
- PINMUX_GPIO(GPIO_FN_SCIF2_L_TXD, SCIF2_L_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_L_SCK, SCIF2_L_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_L_RXD, SCIF2_L_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_V_TXD, SCIF2_V_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_V_SCK, SCIF2_V_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_V_RXD, SCIF2_V_RXD_MARK),
+ GPIO_FN(SCIF2_L_TXD),
+ GPIO_FN(SCIF2_L_SCK),
+ GPIO_FN(SCIF2_L_RXD),
+ GPIO_FN(SCIF2_V_TXD),
+ GPIO_FN(SCIF2_V_SCK),
+ GPIO_FN(SCIF2_V_RXD),
/* SCIF3 */
- PINMUX_GPIO(GPIO_FN_SCIF3_V_SCK, SCIF3_V_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_V_RXD, SCIF3_V_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_V_TXD, SCIF3_V_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_V_CTS, SCIF3_V_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_V_RTS, SCIF3_V_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_I_SCK, SCIF3_I_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_I_RXD, SCIF3_I_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_I_TXD, SCIF3_I_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_I_CTS, SCIF3_I_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_I_RTS, SCIF3_I_RTS_MARK),
+ GPIO_FN(SCIF3_V_SCK),
+ GPIO_FN(SCIF3_V_RXD),
+ GPIO_FN(SCIF3_V_TXD),
+ GPIO_FN(SCIF3_V_CTS),
+ GPIO_FN(SCIF3_V_RTS),
+ GPIO_FN(SCIF3_I_SCK),
+ GPIO_FN(SCIF3_I_RXD),
+ GPIO_FN(SCIF3_I_TXD),
+ GPIO_FN(SCIF3_I_CTS),
+ GPIO_FN(SCIF3_I_RTS),
/* SCIF4 */
- PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ GPIO_FN(SCIF4_SCK),
+ GPIO_FN(SCIF4_RXD),
+ GPIO_FN(SCIF4_TXD),
/* SCIF5 */
- PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ GPIO_FN(SCIF5_SCK),
+ GPIO_FN(SCIF5_RXD),
+ GPIO_FN(SCIF5_TXD),
/* FSI */
- PINMUX_GPIO(GPIO_FN_FSIMCKB, FSIMCKB_MARK),
- PINMUX_GPIO(GPIO_FN_FSIMCKA, FSIMCKA_MARK),
- PINMUX_GPIO(GPIO_FN_FSIOASD, FSIOASD_MARK),
- PINMUX_GPIO(GPIO_FN_FSIIABCK, FSIIABCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSIIALRCK, FSIIALRCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSIOABCK, FSIOABCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSIOALRCK, FSIOALRCK_MARK),
- PINMUX_GPIO(GPIO_FN_CLKAUDIOAO, CLKAUDIOAO_MARK),
- PINMUX_GPIO(GPIO_FN_FSIIBSD, FSIIBSD_MARK),
- PINMUX_GPIO(GPIO_FN_FSIOBSD, FSIOBSD_MARK),
- PINMUX_GPIO(GPIO_FN_FSIIBBCK, FSIIBBCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSIIBLRCK, FSIIBLRCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSIOBBCK, FSIOBBCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSIOBLRCK, FSIOBLRCK_MARK),
- PINMUX_GPIO(GPIO_FN_CLKAUDIOBO, CLKAUDIOBO_MARK),
- PINMUX_GPIO(GPIO_FN_FSIIASD, FSIIASD_MARK),
+ GPIO_FN(FSIMCKB),
+ GPIO_FN(FSIMCKA),
+ GPIO_FN(FSIOASD),
+ GPIO_FN(FSIIABCK),
+ GPIO_FN(FSIIALRCK),
+ GPIO_FN(FSIOABCK),
+ GPIO_FN(FSIOALRCK),
+ GPIO_FN(CLKAUDIOAO),
+ GPIO_FN(FSIIBSD),
+ GPIO_FN(FSIOBSD),
+ GPIO_FN(FSIIBBCK),
+ GPIO_FN(FSIIBLRCK),
+ GPIO_FN(FSIOBBCK),
+ GPIO_FN(FSIOBLRCK),
+ GPIO_FN(CLKAUDIOBO),
+ GPIO_FN(FSIIASD),
/* AUD */
- PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
- PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ GPIO_FN(AUDCK),
+ GPIO_FN(AUDSYNC),
+ GPIO_FN(AUDATA3),
+ GPIO_FN(AUDATA2),
+ GPIO_FN(AUDATA1),
+ GPIO_FN(AUDATA0),
/* VIO */
- PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ GPIO_FN(VIO_CKO),
/* VIO0 */
- PINMUX_GPIO(GPIO_FN_VIO0_D15, VIO0_D15_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D14, VIO0_D14_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D13, VIO0_D13_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D12, VIO0_D12_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D11, VIO0_D11_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D10, VIO0_D10_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D9, VIO0_D9_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D8, VIO0_D8_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D7, VIO0_D7_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D6, VIO0_D6_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D5, VIO0_D5_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D4, VIO0_D4_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D3, VIO0_D3_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D2, VIO0_D2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D1, VIO0_D1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_D0, VIO0_D0_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_VD, VIO0_VD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_CLK, VIO0_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_FLD, VIO0_FLD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO0_HD, VIO0_HD_MARK),
+ GPIO_FN(VIO0_D15),
+ GPIO_FN(VIO0_D14),
+ GPIO_FN(VIO0_D13),
+ GPIO_FN(VIO0_D12),
+ GPIO_FN(VIO0_D11),
+ GPIO_FN(VIO0_D10),
+ GPIO_FN(VIO0_D9),
+ GPIO_FN(VIO0_D8),
+ GPIO_FN(VIO0_D7),
+ GPIO_FN(VIO0_D6),
+ GPIO_FN(VIO0_D5),
+ GPIO_FN(VIO0_D4),
+ GPIO_FN(VIO0_D3),
+ GPIO_FN(VIO0_D2),
+ GPIO_FN(VIO0_D1),
+ GPIO_FN(VIO0_D0),
+ GPIO_FN(VIO0_VD),
+ GPIO_FN(VIO0_CLK),
+ GPIO_FN(VIO0_FLD),
+ GPIO_FN(VIO0_HD),
/* VIO1 */
- PINMUX_GPIO(GPIO_FN_VIO1_D7, VIO1_D7_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D6, VIO1_D6_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D5, VIO1_D5_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D4, VIO1_D4_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D3, VIO1_D3_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D2, VIO1_D2_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D1, VIO1_D1_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_D0, VIO1_D0_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_FLD, VIO1_FLD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_HD, VIO1_HD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_VD, VIO1_VD_MARK),
- PINMUX_GPIO(GPIO_FN_VIO1_CLK, VIO1_CLK_MARK),
+ GPIO_FN(VIO1_D7),
+ GPIO_FN(VIO1_D6),
+ GPIO_FN(VIO1_D5),
+ GPIO_FN(VIO1_D4),
+ GPIO_FN(VIO1_D3),
+ GPIO_FN(VIO1_D2),
+ GPIO_FN(VIO1_D1),
+ GPIO_FN(VIO1_D0),
+ GPIO_FN(VIO1_FLD),
+ GPIO_FN(VIO1_HD),
+ GPIO_FN(VIO1_VD),
+ GPIO_FN(VIO1_CLK),
/* Eth */
- PINMUX_GPIO(GPIO_FN_RMII_RXD0, RMII_RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_RXD1, RMII_RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_TXD0, RMII_TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_TXD1, RMII_TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_REF_CLK, RMII_REF_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_TX_EN, RMII_TX_EN_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_RX_ER, RMII_RX_ER_MARK),
- PINMUX_GPIO(GPIO_FN_RMII_CRS_DV, RMII_CRS_DV_MARK),
- PINMUX_GPIO(GPIO_FN_LNKSTA, LNKSTA_MARK),
- PINMUX_GPIO(GPIO_FN_MDIO, MDIO_MARK),
- PINMUX_GPIO(GPIO_FN_MDC, MDC_MARK),
+ GPIO_FN(RMII_RXD0),
+ GPIO_FN(RMII_RXD1),
+ GPIO_FN(RMII_TXD0),
+ GPIO_FN(RMII_TXD1),
+ GPIO_FN(RMII_REF_CLK),
+ GPIO_FN(RMII_TX_EN),
+ GPIO_FN(RMII_RX_ER),
+ GPIO_FN(RMII_CRS_DV),
+ GPIO_FN(LNKSTA),
+ GPIO_FN(MDIO),
+ GPIO_FN(MDC),
/* System */
- PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS2, STATUS2_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ GPIO_FN(PDSTATUS),
+ GPIO_FN(STATUS2),
+ GPIO_FN(STATUS0),
/* VOU */
- PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
- PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
- PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
- PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ GPIO_FN(DV_D15),
+ GPIO_FN(DV_D14),
+ GPIO_FN(DV_D13),
+ GPIO_FN(DV_D12),
+ GPIO_FN(DV_D11),
+ GPIO_FN(DV_D10),
+ GPIO_FN(DV_D9),
+ GPIO_FN(DV_D8),
+ GPIO_FN(DV_D7),
+ GPIO_FN(DV_D6),
+ GPIO_FN(DV_D5),
+ GPIO_FN(DV_D4),
+ GPIO_FN(DV_D3),
+ GPIO_FN(DV_D2),
+ GPIO_FN(DV_D1),
+ GPIO_FN(DV_D0),
+ GPIO_FN(DV_CLKI),
+ GPIO_FN(DV_CLK),
+ GPIO_FN(DV_VSYNC),
+ GPIO_FN(DV_HSYNC),
/* MSIOF0 */
- PINMUX_GPIO(GPIO_FN_MSIOF0_RXD, MSIOF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_TXD, MSIOF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_MCK, MSIOF0_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_TSCK, MSIOF0_TSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_SS1, MSIOF0_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_SS2, MSIOF0_SS2_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_TSYNC, MSIOF0_TSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_RSCK, MSIOF0_RSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF0_RSYNC, MSIOF0_RSYNC_MARK),
+ GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(MSIOF0_TXD),
+ GPIO_FN(MSIOF0_MCK),
+ GPIO_FN(MSIOF0_TSCK),
+ GPIO_FN(MSIOF0_SS1),
+ GPIO_FN(MSIOF0_SS2),
+ GPIO_FN(MSIOF0_TSYNC),
+ GPIO_FN(MSIOF0_RSCK),
+ GPIO_FN(MSIOF0_RSYNC),
/* MSIOF1 */
- PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
- PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+ GPIO_FN(MSIOF1_RXD),
+ GPIO_FN(MSIOF1_TXD),
+ GPIO_FN(MSIOF1_MCK),
+ GPIO_FN(MSIOF1_TSCK),
+ GPIO_FN(MSIOF1_SS1),
+ GPIO_FN(MSIOF1_SS2),
+ GPIO_FN(MSIOF1_TSYNC),
+ GPIO_FN(MSIOF1_RSCK),
+ GPIO_FN(MSIOF1_RSYNC),
/* DMAC */
- PINMUX_GPIO(GPIO_FN_DMAC_DACK0, DMAC_DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_DMAC_DREQ0, DMAC_DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_DMAC_DACK1, DMAC_DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_DMAC_DREQ1, DMAC_DREQ1_MARK),
+ GPIO_FN(DMAC_DACK0),
+ GPIO_FN(DMAC_DREQ0),
+ GPIO_FN(DMAC_DACK1),
+ GPIO_FN(DMAC_DREQ1),
/* SDHI0 */
- PINMUX_GPIO(GPIO_FN_SDHI0CD, SDHI0CD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0WP, SDHI0WP_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0CMD, SDHI0CMD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0CLK, SDHI0CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D3, SDHI0D3_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D2, SDHI0D2_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D1, SDHI0D1_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI0D0, SDHI0D0_MARK),
+ GPIO_FN(SDHI0CD),
+ GPIO_FN(SDHI0WP),
+ GPIO_FN(SDHI0CMD),
+ GPIO_FN(SDHI0CLK),
+ GPIO_FN(SDHI0D3),
+ GPIO_FN(SDHI0D2),
+ GPIO_FN(SDHI0D1),
+ GPIO_FN(SDHI0D0),
/* SDHI1 */
- PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
- PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+ GPIO_FN(SDHI1CD),
+ GPIO_FN(SDHI1WP),
+ GPIO_FN(SDHI1CMD),
+ GPIO_FN(SDHI1CLK),
+ GPIO_FN(SDHI1D3),
+ GPIO_FN(SDHI1D2),
+ GPIO_FN(SDHI1D1),
+ GPIO_FN(SDHI1D0),
/* MMC */
- PINMUX_GPIO(GPIO_FN_MMC_D7, MMC_D7_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D6, MMC_D6_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D5, MMC_D5_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D4, MMC_D4_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D3, MMC_D3_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D2, MMC_D2_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D1, MMC_D1_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_D0, MMC_D0_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+ GPIO_FN(MMC_D7),
+ GPIO_FN(MMC_D6),
+ GPIO_FN(MMC_D5),
+ GPIO_FN(MMC_D4),
+ GPIO_FN(MMC_D3),
+ GPIO_FN(MMC_D2),
+ GPIO_FN(MMC_D1),
+ GPIO_FN(MMC_D0),
+ GPIO_FN(MMC_CLK),
+ GPIO_FN(MMC_CMD),
/* IrDA */
- PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
- PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ GPIO_FN(IRDA_OUT),
+ GPIO_FN(IRDA_IN),
/* TSIF */
- PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDAT, TSIF_TS0_SDAT_MARK),
- PINMUX_GPIO(GPIO_FN_TSIF_TS0_SCK, TSIF_TS0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDEN, TSIF_TS0_SDEN_MARK),
- PINMUX_GPIO(GPIO_FN_TSIF_TS0_SPSYNC, TSIF_TS0_SPSYNC_MARK),
+ GPIO_FN(TSIF_TS0_SDAT),
+ GPIO_FN(TSIF_TS0_SCK),
+ GPIO_FN(TSIF_TS0_SDEN),
+ GPIO_FN(TSIF_TS0_SPSYNC),
/* IRQ */
- PINMUX_GPIO(GPIO_FN_INTC_IRQ7, INTC_IRQ7_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ6, INTC_IRQ6_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ5, INTC_IRQ5_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ4, INTC_IRQ4_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ3, INTC_IRQ3_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ2, INTC_IRQ2_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ1, INTC_IRQ1_MARK),
- PINMUX_GPIO(GPIO_FN_INTC_IRQ0, INTC_IRQ0_MARK),
+ GPIO_FN(INTC_IRQ7),
+ GPIO_FN(INTC_IRQ6),
+ GPIO_FN(INTC_IRQ5),
+ GPIO_FN(INTC_IRQ4),
+ GPIO_FN(INTC_IRQ3),
+ GPIO_FN(INTC_IRQ2),
+ GPIO_FN(INTC_IRQ1),
+ GPIO_FN(INTC_IRQ0),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
@@ -2107,7 +2111,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
@@ -2203,20 +2207,18 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7724_pinmux_info = {
+const struct sh_pfc_soc_info sh7724_pinmux_info = {
.name = "sh7724_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PTA7,
- .last_gpio = GPIO_FN_INTC_IRQ0,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 23d76d2..2fd5b7d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -14,11 +14,6 @@
#include "sh_pfc.h"
-#define CPU_32_PORT(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
- PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
- PORT_1(fn, pfx##31, sfx)
-
#define CPU_32_PORT5(fn, pfx, sfx) \
PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
@@ -29,11 +24,11 @@
/* GPSR0 - GPSR5 */
#define CPU_ALL_PORT(fn, pfx, sfx) \
- CPU_32_PORT(fn, pfx##_0_, sfx), \
- CPU_32_PORT(fn, pfx##_1_, sfx), \
- CPU_32_PORT(fn, pfx##_2_, sfx), \
- CPU_32_PORT(fn, pfx##_3_, sfx), \
- CPU_32_PORT(fn, pfx##_4_, sfx), \
+ PORT_32(fn, pfx##_0_, sfx), \
+ PORT_32(fn, pfx##_1_, sfx), \
+ PORT_32(fn, pfx##_2_, sfx), \
+ PORT_32(fn, pfx##_3_, sfx), \
+ PORT_32(fn, pfx##_4_, sfx), \
CPU_32_PORT5(fn, pfx##_5_, sfx)
#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
@@ -47,20 +42,8 @@
#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
-#define PORT_10_REV(fn, pfx, sfx) \
- PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
- PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
- PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
- PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
- PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
-
-#define CPU_32_PORT_REV(fn, pfx, sfx) \
- PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
- PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
- PORT_10_REV(fn, pfx, sfx)
-
-#define GP_INOUTSEL(bank) CPU_32_PORT_REV(_GP_INOUTSEL, _##bank##_, unused)
-#define GP_INDT(bank) CPU_32_PORT_REV(_GP_INDT, _##bank##_, unused)
+#define GP_INOUTSEL(bank) PORT_32_REV(_GP_INOUTSEL, _##bank##_, unused)
+#define GP_INDT(bank) PORT_32_REV(_GP_INDT, _##bank##_, unused)
#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
@@ -609,7 +592,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
PINMUX_DATA(CLKOUT_MARK, FN_CLKOUT),
@@ -1384,9 +1367,13 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(CLKOUT), GPIO_FN(BS), GPIO_FN(CS0), GPIO_FN(EX_CS0),
GPIO_FN(RD), GPIO_FN(WE0), GPIO_FN(WE1),
GPIO_FN(SCL0), GPIO_FN(PENC0), GPIO_FN(USB_OVC0),
@@ -1665,7 +1652,7 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(SCL1), GPIO_FN(SCIF_CLK_C),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("GPSR0", 0xFFFC0004, 32, 1) {
GP_0_31_FN, FN_IP2_2_0,
GP_0_30_FN, FN_IP1_31_29,
@@ -2434,7 +2421,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
/* GPIO 0 - 5*/
{ PINMUX_DATA_REG("INDT0", 0xFFC4000C, 32) { GP_INDT(0) } },
{ PINMUX_DATA_REG("INDT1", 0xFFC4100C, 32) { GP_INDT(1) } },
@@ -2451,22 +2438,20 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7734_pinmux_info = {
+const struct sh_pfc_soc_info sh7734_pinmux_info = {
.name = "sh7734_pfc",
.unlock_reg = 0xFFFC0000,
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_GP_0_0,
- .last_gpio = GPIO_FN_ST_CLKOUT,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index 5ed74cd..e074230 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -526,7 +526,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
@@ -1114,7 +1114,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
@@ -1370,359 +1370,363 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* PTA (mobule: LBSC, RGMII) */
- PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
- PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
- PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
- PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
- PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDIO_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
- PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDIO_MARK),
+ GPIO_FN(BS),
+ GPIO_FN(RDWR),
+ GPIO_FN(WE1),
+ GPIO_FN(RDY),
+ GPIO_FN(ET0_MDC),
+ GPIO_FN(ET0_MDIO),
+ GPIO_FN(ET1_MDC),
+ GPIO_FN(ET1_MDIO),
/* PTB (mobule: INTC, ONFI, TMU) */
- PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
- PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK),
- PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK),
- PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK),
- PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK),
- PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK),
- PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK),
- PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK),
- PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ GPIO_FN(IRQ15),
+ GPIO_FN(IRQ14),
+ GPIO_FN(IRQ13),
+ GPIO_FN(IRQ12),
+ GPIO_FN(IRQ11),
+ GPIO_FN(IRQ10),
+ GPIO_FN(IRQ9),
+ GPIO_FN(IRQ8),
+ GPIO_FN(ON_NRE),
+ GPIO_FN(ON_NWE),
+ GPIO_FN(ON_NWP),
+ GPIO_FN(ON_NCE0),
+ GPIO_FN(ON_R_B0),
+ GPIO_FN(ON_ALE),
+ GPIO_FN(ON_CLE),
+ GPIO_FN(TCLK),
/* PTC (mobule: IRQ, PWMU) */
- PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
- PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK),
- PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK),
- PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK),
- PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK),
- PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK),
- PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK),
+ GPIO_FN(IRQ7),
+ GPIO_FN(IRQ6),
+ GPIO_FN(IRQ5),
+ GPIO_FN(IRQ4),
+ GPIO_FN(IRQ3),
+ GPIO_FN(IRQ2),
+ GPIO_FN(IRQ1),
+ GPIO_FN(IRQ0),
+ GPIO_FN(PWMU0),
+ GPIO_FN(PWMU1),
+ GPIO_FN(PWMU2),
+ GPIO_FN(PWMU3),
+ GPIO_FN(PWMU4),
+ GPIO_FN(PWMU5),
/* PTD (mobule: SPI0, DMAC) */
- PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK),
- PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ GPIO_FN(SP0_MOSI),
+ GPIO_FN(SP0_MISO),
+ GPIO_FN(SP0_SCK),
+ GPIO_FN(SP0_SCK_FB),
+ GPIO_FN(SP0_SS0),
+ GPIO_FN(SP0_SS1),
+ GPIO_FN(SP0_SS2),
+ GPIO_FN(SP0_SS3),
+ GPIO_FN(DREQ0),
+ GPIO_FN(DACK0),
+ GPIO_FN(TEND0),
/* PTE (mobule: RMII) */
- PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK),
+ GPIO_FN(RMII0_CRS_DV),
+ GPIO_FN(RMII0_TXD1),
+ GPIO_FN(RMII0_TXD0),
+ GPIO_FN(RMII0_TXEN),
+ GPIO_FN(RMII0_REFCLK),
+ GPIO_FN(RMII0_RXD1),
+ GPIO_FN(RMII0_RXD0),
+ GPIO_FN(RMII0_RX_ER),
/* PTF (mobule: RMII, SerMux) */
- PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+ GPIO_FN(RMII1_CRS_DV),
+ GPIO_FN(RMII1_TXD1),
+ GPIO_FN(RMII1_TXD0),
+ GPIO_FN(RMII1_TXEN),
+ GPIO_FN(RMII1_REFCLK),
+ GPIO_FN(RMII1_RXD1),
+ GPIO_FN(RMII1_RXD0),
+ GPIO_FN(RMII1_RX_ER),
+ GPIO_FN(RAC_RI),
/* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
- PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK),
- PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK),
- PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
- PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
- PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
- PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
- PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
- PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
- PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
- PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+ GPIO_FN(BOOTFMS),
+ GPIO_FN(BOOTWP),
+ GPIO_FN(A25),
+ GPIO_FN(A24),
+ GPIO_FN(SERIRQ),
+ GPIO_FN(WDTOVF),
+ GPIO_FN(LPCPD),
+ GPIO_FN(LDRQ),
+ GPIO_FN(MMCCLK),
+ GPIO_FN(MMCCMD),
/* PTH (mobule: SPI1, LPC, DMAC, ADC) */
- PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
- PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
- PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
- PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
- PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
- PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
- PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
- PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
- PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
+ GPIO_FN(SP1_MOSI),
+ GPIO_FN(SP1_MISO),
+ GPIO_FN(SP1_SCK),
+ GPIO_FN(SP1_SCK_FB),
+ GPIO_FN(SP1_SS0),
+ GPIO_FN(SP1_SS1),
+ GPIO_FN(WP),
+ GPIO_FN(FMS0),
+ GPIO_FN(TEND1),
+ GPIO_FN(DREQ1),
+ GPIO_FN(DACK1),
+ GPIO_FN(ADTRG1),
+ GPIO_FN(ADTRG0),
/* PTI (mobule: LBSC, SDHI) */
- PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
- PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
- PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
- PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
- PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
- PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
- PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
- PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
- PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
- PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
- PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
- PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
+ GPIO_FN(D15),
+ GPIO_FN(D14),
+ GPIO_FN(D13),
+ GPIO_FN(D12),
+ GPIO_FN(D11),
+ GPIO_FN(D10),
+ GPIO_FN(D9),
+ GPIO_FN(D8),
+ GPIO_FN(SD_WP),
+ GPIO_FN(SD_CD),
+ GPIO_FN(SD_CLK),
+ GPIO_FN(SD_CMD),
+ GPIO_FN(SD_D3),
+ GPIO_FN(SD_D2),
+ GPIO_FN(SD_D1),
+ GPIO_FN(SD_D0),
/* PTJ (mobule: SCIF234, SERMUX) */
- PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
- PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
- PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
- PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
- PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ GPIO_FN(RTS3),
+ GPIO_FN(CTS3),
+ GPIO_FN(TXD3),
+ GPIO_FN(RXD3),
+ GPIO_FN(RTS4),
+ GPIO_FN(RXD4),
+ GPIO_FN(TXD4),
/* PTK (mobule: SERMUX, LBSC, SCIF) */
- PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
- PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK),
- PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
- PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
- PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ GPIO_FN(COM2_TXD),
+ GPIO_FN(COM2_RXD),
+ GPIO_FN(COM2_RTS),
+ GPIO_FN(COM2_CTS),
+ GPIO_FN(COM2_DTR),
+ GPIO_FN(COM2_DSR),
+ GPIO_FN(COM2_DCD),
+ GPIO_FN(CLKOUT),
+ GPIO_FN(SCK2),
+ GPIO_FN(SCK4),
+ GPIO_FN(SCK3),
/* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
- PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
- PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
- PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
- PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
- PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ GPIO_FN(RAC_RXD),
+ GPIO_FN(RAC_RTS),
+ GPIO_FN(RAC_CTS),
+ GPIO_FN(RAC_DTR),
+ GPIO_FN(RAC_DSR),
+ GPIO_FN(RAC_DCD),
+ GPIO_FN(RAC_TXD),
+ GPIO_FN(RXD2),
+ GPIO_FN(CS5),
+ GPIO_FN(CS6),
+ GPIO_FN(AUDSYNC),
+ GPIO_FN(AUDCK),
+ GPIO_FN(TXD2),
/* PTM (mobule: LBSC, IIC) */
- PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
- PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
- PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
- PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
- PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
- PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
- PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
- PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
+ GPIO_FN(CS4),
+ GPIO_FN(RD),
+ GPIO_FN(WE0),
+ GPIO_FN(CS0),
+ GPIO_FN(SDA6),
+ GPIO_FN(SCL6),
+ GPIO_FN(SDA7),
+ GPIO_FN(SCL7),
/* PTN (mobule: USB, JMC, SGPIO, WDT) */
- PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK),
- PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
- PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
- PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK),
+ GPIO_FN(VBUS_EN),
+ GPIO_FN(VBUS_OC),
+ GPIO_FN(JMCTCK),
+ GPIO_FN(JMCTMS),
+ GPIO_FN(JMCTDO),
+ GPIO_FN(JMCTDI),
+ GPIO_FN(JMCTRST),
+ GPIO_FN(SGPIO1_CLK),
+ GPIO_FN(SGPIO1_LOAD),
+ GPIO_FN(SGPIO1_DI),
+ GPIO_FN(SGPIO1_DO),
+ GPIO_FN(SUB_CLKIN),
/* PTO (mobule: SGPIO, SerMux) */
- PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK),
- PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
+ GPIO_FN(SGPIO0_CLK),
+ GPIO_FN(SGPIO0_LOAD),
+ GPIO_FN(SGPIO0_DI),
+ GPIO_FN(SGPIO0_DO),
+ GPIO_FN(SGPIO2_CLK),
+ GPIO_FN(SGPIO2_LOAD),
+ GPIO_FN(SGPIO2_DI),
+ GPIO_FN(SGPIO2_DO),
+ GPIO_FN(COM1_TXD),
+ GPIO_FN(COM1_RXD),
+ GPIO_FN(COM1_RTS),
+ GPIO_FN(COM1_CTS),
/* PTP (mobule: EVC, ADC) */
/* PTQ (mobule: LPC) */
- PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
- PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK),
- PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK),
- PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK),
- PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK),
- PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK),
- PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK),
+ GPIO_FN(LAD3),
+ GPIO_FN(LAD2),
+ GPIO_FN(LAD1),
+ GPIO_FN(LAD0),
+ GPIO_FN(LFRAME),
+ GPIO_FN(LRESET),
+ GPIO_FN(LCLK),
/* PTR (mobule: GRA, IIC) */
- PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK),
- PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK),
- PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK),
- PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK),
- PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
- PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
- PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
- PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
- PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
- PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+ GPIO_FN(DDC3),
+ GPIO_FN(DDC2),
+ GPIO_FN(SDA8),
+ GPIO_FN(SCL8),
+ GPIO_FN(SDA2),
+ GPIO_FN(SCL2),
+ GPIO_FN(SDA1),
+ GPIO_FN(SCL1),
+ GPIO_FN(SDA0),
+ GPIO_FN(SCL0),
/* PTS (mobule: GRA, IIC) */
- PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK),
- PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK),
- PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK),
- PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK),
- PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK),
- PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK),
- PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK),
- PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK),
- PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
- PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+ GPIO_FN(DDC1),
+ GPIO_FN(DDC0),
+ GPIO_FN(SDA9),
+ GPIO_FN(SCL9),
+ GPIO_FN(SDA5),
+ GPIO_FN(SCL5),
+ GPIO_FN(SDA4),
+ GPIO_FN(SCL4),
+ GPIO_FN(SDA3),
+ GPIO_FN(SCL3),
/* PTT (mobule: PWMX, AUD) */
- PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK),
- PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
- PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ GPIO_FN(PWMX7),
+ GPIO_FN(PWMX6),
+ GPIO_FN(PWMX5),
+ GPIO_FN(PWMX4),
+ GPIO_FN(PWMX3),
+ GPIO_FN(PWMX2),
+ GPIO_FN(PWMX1),
+ GPIO_FN(PWMX0),
+ GPIO_FN(AUDATA3),
+ GPIO_FN(AUDATA2),
+ GPIO_FN(AUDATA1),
+ GPIO_FN(AUDATA0),
+ GPIO_FN(STATUS1),
+ GPIO_FN(STATUS0),
/* PTU (mobule: LPC, APM) */
- PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
- PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
- PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK),
- PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK),
- PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK),
- PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK),
- PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK),
- PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK),
- PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK),
- PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK),
+ GPIO_FN(LGPIO7),
+ GPIO_FN(LGPIO6),
+ GPIO_FN(LGPIO5),
+ GPIO_FN(LGPIO4),
+ GPIO_FN(LGPIO3),
+ GPIO_FN(LGPIO2),
+ GPIO_FN(LGPIO1),
+ GPIO_FN(LGPIO0),
+ GPIO_FN(APMONCTL_O),
+ GPIO_FN(APMPWBTOUT_O),
+ GPIO_FN(APMSCI_O),
+ GPIO_FN(APMVDDON),
+ GPIO_FN(APMSLPBTN),
+ GPIO_FN(APMPWRBTN),
+ GPIO_FN(APMS5N),
+ GPIO_FN(APMS3N),
/* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
- PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
- PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
- PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
- PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
- PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
- PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
- PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
- PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
- PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
- PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK),
- PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK),
- PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK),
- PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK),
- PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
- PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK),
- PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK),
- PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK),
+ GPIO_FN(A23),
+ GPIO_FN(A22),
+ GPIO_FN(A21),
+ GPIO_FN(A20),
+ GPIO_FN(A19),
+ GPIO_FN(A18),
+ GPIO_FN(A17),
+ GPIO_FN(A16),
+ GPIO_FN(COM2_RI),
+ GPIO_FN(R_SPI_MOSI),
+ GPIO_FN(R_SPI_MISO),
+ GPIO_FN(R_SPI_RSPCK),
+ GPIO_FN(R_SPI_SSL0),
+ GPIO_FN(R_SPI_SSL1),
+ GPIO_FN(EVENT7),
+ GPIO_FN(EVENT6),
+ GPIO_FN(VBIOS_DI),
+ GPIO_FN(VBIOS_DO),
+ GPIO_FN(VBIOS_CLK),
+ GPIO_FN(VBIOS_CS),
/* PTW (mobule: LBSC, EVC, SCIF) */
- PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
- PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
- PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
- PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
- PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
- PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
- PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
- PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
- PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
- PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
- PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
- PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
+ GPIO_FN(A16),
+ GPIO_FN(A15),
+ GPIO_FN(A14),
+ GPIO_FN(A13),
+ GPIO_FN(A12),
+ GPIO_FN(A11),
+ GPIO_FN(A10),
+ GPIO_FN(A9),
+ GPIO_FN(A8),
+ GPIO_FN(EVENT5),
+ GPIO_FN(EVENT4),
+ GPIO_FN(EVENT3),
+ GPIO_FN(EVENT2),
+ GPIO_FN(EVENT1),
+ GPIO_FN(EVENT0),
+ GPIO_FN(CTS4),
+ GPIO_FN(CTS2),
/* PTX (mobule: LBSC) */
- PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
- PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
- PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
- PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
- PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
- PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
- PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
- PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
- PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+ GPIO_FN(A7),
+ GPIO_FN(A6),
+ GPIO_FN(A5),
+ GPIO_FN(A4),
+ GPIO_FN(A3),
+ GPIO_FN(A2),
+ GPIO_FN(A1),
+ GPIO_FN(A0),
+ GPIO_FN(RTS2),
+ GPIO_FN(SIM_D),
+ GPIO_FN(SIM_CLK),
+ GPIO_FN(SIM_RST),
/* PTY (mobule: LBSC) */
- PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
- PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
- PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
- PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
- PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
- PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
- PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
- PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+ GPIO_FN(D7),
+ GPIO_FN(D6),
+ GPIO_FN(D5),
+ GPIO_FN(D4),
+ GPIO_FN(D3),
+ GPIO_FN(D2),
+ GPIO_FN(D1),
+ GPIO_FN(D0),
/* PTZ (mobule: eMMC, ONFI) */
- PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK),
- PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK),
+ GPIO_FN(MMCDAT7),
+ GPIO_FN(MMCDAT6),
+ GPIO_FN(MMCDAT5),
+ GPIO_FN(MMCDAT4),
+ GPIO_FN(MMCDAT3),
+ GPIO_FN(MMCDAT2),
+ GPIO_FN(MMCDAT1),
+ GPIO_FN(MMCDAT0),
+ GPIO_FN(ON_DQ7),
+ GPIO_FN(ON_DQ6),
+ GPIO_FN(ON_DQ5),
+ GPIO_FN(ON_DQ4),
+ GPIO_FN(ON_DQ3),
+ GPIO_FN(ON_DQ2),
+ GPIO_FN(ON_DQ1),
+ GPIO_FN(ON_DQ0),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
@@ -2152,7 +2156,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
@@ -2260,20 +2264,18 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7757_pinmux_info = {
+const struct sh_pfc_soc_info sh7757_pinmux_info = {
.name = "sh7757_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PTA0,
- .last_gpio = GPIO_FN_ON_DQ0,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
index 3b1825d..c176b79 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -355,7 +355,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PA GPIO */
PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
@@ -702,7 +702,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(GPIO_PA7, PA7_DATA),
PINMUX_GPIO(GPIO_PA6, PA6_DATA),
@@ -845,176 +845,180 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PR2, PR2_DATA),
PINMUX_GPIO(GPIO_PR1, PR1_DATA),
PINMUX_GPIO(GPIO_PR0, PR0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* FN */
- PINMUX_GPIO(GPIO_FN_D63_AD31, D63_AD31_MARK),
- PINMUX_GPIO(GPIO_FN_D62_AD30, D62_AD30_MARK),
- PINMUX_GPIO(GPIO_FN_D61_AD29, D61_AD29_MARK),
- PINMUX_GPIO(GPIO_FN_D60_AD28, D60_AD28_MARK),
- PINMUX_GPIO(GPIO_FN_D59_AD27, D59_AD27_MARK),
- PINMUX_GPIO(GPIO_FN_D58_AD26, D58_AD26_MARK),
- PINMUX_GPIO(GPIO_FN_D57_AD25, D57_AD25_MARK),
- PINMUX_GPIO(GPIO_FN_D56_AD24, D56_AD24_MARK),
- PINMUX_GPIO(GPIO_FN_D55_AD23, D55_AD23_MARK),
- PINMUX_GPIO(GPIO_FN_D54_AD22, D54_AD22_MARK),
- PINMUX_GPIO(GPIO_FN_D53_AD21, D53_AD21_MARK),
- PINMUX_GPIO(GPIO_FN_D52_AD20, D52_AD20_MARK),
- PINMUX_GPIO(GPIO_FN_D51_AD19, D51_AD19_MARK),
- PINMUX_GPIO(GPIO_FN_D50_AD18, D50_AD18_MARK),
- PINMUX_GPIO(GPIO_FN_D49_AD17_DB5, D49_AD17_DB5_MARK),
- PINMUX_GPIO(GPIO_FN_D48_AD16_DB4, D48_AD16_DB4_MARK),
- PINMUX_GPIO(GPIO_FN_D47_AD15_DB3, D47_AD15_DB3_MARK),
- PINMUX_GPIO(GPIO_FN_D46_AD14_DB2, D46_AD14_DB2_MARK),
- PINMUX_GPIO(GPIO_FN_D45_AD13_DB1, D45_AD13_DB1_MARK),
- PINMUX_GPIO(GPIO_FN_D44_AD12_DB0, D44_AD12_DB0_MARK),
- PINMUX_GPIO(GPIO_FN_D43_AD11_DG5, D43_AD11_DG5_MARK),
- PINMUX_GPIO(GPIO_FN_D42_AD10_DG4, D42_AD10_DG4_MARK),
- PINMUX_GPIO(GPIO_FN_D41_AD9_DG3, D41_AD9_DG3_MARK),
- PINMUX_GPIO(GPIO_FN_D40_AD8_DG2, D40_AD8_DG2_MARK),
- PINMUX_GPIO(GPIO_FN_D39_AD7_DG1, D39_AD7_DG1_MARK),
- PINMUX_GPIO(GPIO_FN_D38_AD6_DG0, D38_AD6_DG0_MARK),
- PINMUX_GPIO(GPIO_FN_D37_AD5_DR5, D37_AD5_DR5_MARK),
- PINMUX_GPIO(GPIO_FN_D36_AD4_DR4, D36_AD4_DR4_MARK),
- PINMUX_GPIO(GPIO_FN_D35_AD3_DR3, D35_AD3_DR3_MARK),
- PINMUX_GPIO(GPIO_FN_D34_AD2_DR2, D34_AD2_DR2_MARK),
- PINMUX_GPIO(GPIO_FN_D33_AD1_DR1, D33_AD1_DR1_MARK),
- PINMUX_GPIO(GPIO_FN_D32_AD0_DR0, D32_AD0_DR0_MARK),
- PINMUX_GPIO(GPIO_FN_REQ1, REQ1_MARK),
- PINMUX_GPIO(GPIO_FN_REQ2, REQ2_MARK),
- PINMUX_GPIO(GPIO_FN_REQ3, REQ3_MARK),
- PINMUX_GPIO(GPIO_FN_GNT1, GNT1_MARK),
- PINMUX_GPIO(GPIO_FN_GNT2, GNT2_MARK),
- PINMUX_GPIO(GPIO_FN_GNT3, GNT3_MARK),
- PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
- PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
- PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
- PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
- PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
- PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
- PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
- PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
- PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
- PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
- PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
- PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
- PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
- PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
- PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
- PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
- PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_INTD, INTD_MARK),
- PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
- PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_FRE, FRE_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
- PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_TXD_PJ, SIOF_TXD_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_RXD_PJ, SIOF_RXD_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_SYNC_PJ, SIOF_SYNC_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_MCLK_PJ, SIOF_MCLK_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_SCK_PJ, SIOF_SCK_PJ_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK0_PK3, DRAK0_PK3_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK1_PK2, DRAK1_PK2_MARK),
- PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_TXD_PK, SIOF_TXD_PK_MARK),
- PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_MMCDAT, MMCDAT_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_SCK_PK, SIOF_SCK_PK_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK0_PK1, DRAK0_PK1_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK1_PK0, DRAK1_PK0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
- PINMUX_GPIO(GPIO_FN_INTB, INTB_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
- PINMUX_GPIO(GPIO_FN_INTC, INTC_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
- PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
- PINMUX_GPIO(GPIO_FN_FD4, FD4_MARK),
- PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
- PINMUX_GPIO(GPIO_FN_FD5, FD5_MARK),
- PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
- PINMUX_GPIO(GPIO_FN_FD6, FD6_MARK),
- PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
- PINMUX_GPIO(GPIO_FN_FD7, FD7_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
- PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_BREQ_BSACK, BREQ_BSACK_MARK),
- PINMUX_GPIO(GPIO_FN_BACK_BSREQ, BACK_BSREQ_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_FD0, FD0_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_FD1, FD1_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_FD2, FD2_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_FD3, FD3_MARK),
- PINMUX_GPIO(GPIO_FN_DEVSEL_DCLKOUT, DEVSEL_DCLKOUT_MARK),
- PINMUX_GPIO(GPIO_FN_STOP_CDE, STOP_CDE_MARK),
- PINMUX_GPIO(GPIO_FN_LOCK_ODDF, LOCK_ODDF_MARK),
- PINMUX_GPIO(GPIO_FN_TRDY_DISPL, TRDY_DISPL_MARK),
- PINMUX_GPIO(GPIO_FN_IRDY_HSYNC, IRDY_HSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_PCIFRAME_VSYNC, PCIFRAME_VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_INTA, INTA_MARK),
- PINMUX_GPIO(GPIO_FN_GNT0_GNTIN, GNT0_GNTIN_MARK),
- PINMUX_GPIO(GPIO_FN_REQ0_REQOUT, REQ0_REQOUT_MARK),
- PINMUX_GPIO(GPIO_FN_PERR, PERR_MARK),
- PINMUX_GPIO(GPIO_FN_SERR, SERR_MARK),
- PINMUX_GPIO(GPIO_FN_WE7_CBE3, WE7_CBE3_MARK),
- PINMUX_GPIO(GPIO_FN_WE6_CBE2, WE6_CBE2_MARK),
- PINMUX_GPIO(GPIO_FN_WE5_CBE1, WE5_CBE1_MARK),
- PINMUX_GPIO(GPIO_FN_WE4_CBE0, WE4_CBE0_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SIOF_RXD, SIOF_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_MRESETOUT, MRESETOUT_MARK),
- PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+ GPIO_FN(D63_AD31),
+ GPIO_FN(D62_AD30),
+ GPIO_FN(D61_AD29),
+ GPIO_FN(D60_AD28),
+ GPIO_FN(D59_AD27),
+ GPIO_FN(D58_AD26),
+ GPIO_FN(D57_AD25),
+ GPIO_FN(D56_AD24),
+ GPIO_FN(D55_AD23),
+ GPIO_FN(D54_AD22),
+ GPIO_FN(D53_AD21),
+ GPIO_FN(D52_AD20),
+ GPIO_FN(D51_AD19),
+ GPIO_FN(D50_AD18),
+ GPIO_FN(D49_AD17_DB5),
+ GPIO_FN(D48_AD16_DB4),
+ GPIO_FN(D47_AD15_DB3),
+ GPIO_FN(D46_AD14_DB2),
+ GPIO_FN(D45_AD13_DB1),
+ GPIO_FN(D44_AD12_DB0),
+ GPIO_FN(D43_AD11_DG5),
+ GPIO_FN(D42_AD10_DG4),
+ GPIO_FN(D41_AD9_DG3),
+ GPIO_FN(D40_AD8_DG2),
+ GPIO_FN(D39_AD7_DG1),
+ GPIO_FN(D38_AD6_DG0),
+ GPIO_FN(D37_AD5_DR5),
+ GPIO_FN(D36_AD4_DR4),
+ GPIO_FN(D35_AD3_DR3),
+ GPIO_FN(D34_AD2_DR2),
+ GPIO_FN(D33_AD1_DR1),
+ GPIO_FN(D32_AD0_DR0),
+ GPIO_FN(REQ1),
+ GPIO_FN(REQ2),
+ GPIO_FN(REQ3),
+ GPIO_FN(GNT1),
+ GPIO_FN(GNT2),
+ GPIO_FN(GNT3),
+ GPIO_FN(MMCCLK),
+ GPIO_FN(D31),
+ GPIO_FN(D30),
+ GPIO_FN(D29),
+ GPIO_FN(D28),
+ GPIO_FN(D27),
+ GPIO_FN(D26),
+ GPIO_FN(D25),
+ GPIO_FN(D24),
+ GPIO_FN(D23),
+ GPIO_FN(D22),
+ GPIO_FN(D21),
+ GPIO_FN(D20),
+ GPIO_FN(D19),
+ GPIO_FN(D18),
+ GPIO_FN(D17),
+ GPIO_FN(D16),
+ GPIO_FN(SCIF1_SCK),
+ GPIO_FN(SCIF1_RXD),
+ GPIO_FN(SCIF1_TXD),
+ GPIO_FN(SCIF0_CTS),
+ GPIO_FN(INTD),
+ GPIO_FN(FCE),
+ GPIO_FN(SCIF0_RTS),
+ GPIO_FN(HSPI_CS),
+ GPIO_FN(FSE),
+ GPIO_FN(SCIF0_SCK),
+ GPIO_FN(HSPI_CLK),
+ GPIO_FN(FRE),
+ GPIO_FN(SCIF0_RXD),
+ GPIO_FN(HSPI_RX),
+ GPIO_FN(FRB),
+ GPIO_FN(SCIF0_TXD),
+ GPIO_FN(HSPI_TX),
+ GPIO_FN(FWE),
+ GPIO_FN(SCIF5_TXD),
+ GPIO_FN(HAC1_SYNC),
+ GPIO_FN(SSI1_WS),
+ GPIO_FN(SIOF_TXD_PJ),
+ GPIO_FN(HAC0_SDOUT),
+ GPIO_FN(SSI0_SDATA),
+ GPIO_FN(SIOF_RXD_PJ),
+ GPIO_FN(HAC0_SDIN),
+ GPIO_FN(SSI0_SCK),
+ GPIO_FN(SIOF_SYNC_PJ),
+ GPIO_FN(HAC0_SYNC),
+ GPIO_FN(SSI0_WS),
+ GPIO_FN(SIOF_MCLK_PJ),
+ GPIO_FN(HAC_RES),
+ GPIO_FN(SIOF_SCK_PJ),
+ GPIO_FN(HAC0_BITCLK),
+ GPIO_FN(SSI0_CLK),
+ GPIO_FN(HAC1_BITCLK),
+ GPIO_FN(SSI1_CLK),
+ GPIO_FN(TCLK),
+ GPIO_FN(IOIS16),
+ GPIO_FN(STATUS0),
+ GPIO_FN(DRAK0_PK3),
+ GPIO_FN(STATUS1),
+ GPIO_FN(DRAK1_PK2),
+ GPIO_FN(DACK2),
+ GPIO_FN(SCIF2_TXD),
+ GPIO_FN(MMCCMD),
+ GPIO_FN(SIOF_TXD_PK),
+ GPIO_FN(DACK3),
+ GPIO_FN(SCIF2_SCK),
+ GPIO_FN(MMCDAT),
+ GPIO_FN(SIOF_SCK_PK),
+ GPIO_FN(DREQ0),
+ GPIO_FN(DREQ1),
+ GPIO_FN(DRAK0_PK1),
+ GPIO_FN(DRAK1_PK0),
+ GPIO_FN(DREQ2),
+ GPIO_FN(INTB),
+ GPIO_FN(DREQ3),
+ GPIO_FN(INTC),
+ GPIO_FN(DRAK2),
+ GPIO_FN(CE2A),
+ GPIO_FN(IRL4),
+ GPIO_FN(FD4),
+ GPIO_FN(IRL5),
+ GPIO_FN(FD5),
+ GPIO_FN(IRL6),
+ GPIO_FN(FD6),
+ GPIO_FN(IRL7),
+ GPIO_FN(FD7),
+ GPIO_FN(DRAK3),
+ GPIO_FN(CE2B),
+ GPIO_FN(BREQ_BSACK),
+ GPIO_FN(BACK_BSREQ),
+ GPIO_FN(SCIF5_RXD),
+ GPIO_FN(HAC1_SDIN),
+ GPIO_FN(SSI1_SCK),
+ GPIO_FN(SCIF5_SCK),
+ GPIO_FN(HAC1_SDOUT),
+ GPIO_FN(SSI1_SDATA),
+ GPIO_FN(SCIF3_TXD),
+ GPIO_FN(FCLE),
+ GPIO_FN(SCIF3_RXD),
+ GPIO_FN(FALE),
+ GPIO_FN(SCIF3_SCK),
+ GPIO_FN(FD0),
+ GPIO_FN(SCIF4_TXD),
+ GPIO_FN(FD1),
+ GPIO_FN(SCIF4_RXD),
+ GPIO_FN(FD2),
+ GPIO_FN(SCIF4_SCK),
+ GPIO_FN(FD3),
+ GPIO_FN(DEVSEL_DCLKOUT),
+ GPIO_FN(STOP_CDE),
+ GPIO_FN(LOCK_ODDF),
+ GPIO_FN(TRDY_DISPL),
+ GPIO_FN(IRDY_HSYNC),
+ GPIO_FN(PCIFRAME_VSYNC),
+ GPIO_FN(INTA),
+ GPIO_FN(GNT0_GNTIN),
+ GPIO_FN(REQ0_REQOUT),
+ GPIO_FN(PERR),
+ GPIO_FN(SERR),
+ GPIO_FN(WE7_CBE3),
+ GPIO_FN(WE6_CBE2),
+ GPIO_FN(WE5_CBE1),
+ GPIO_FN(WE4_CBE0),
+ GPIO_FN(SCIF2_RXD),
+ GPIO_FN(SIOF_RXD),
+ GPIO_FN(MRESETOUT),
+ GPIO_FN(IRQOUT),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) {
PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
@@ -1214,7 +1218,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xffe70020, 8) {
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
@@ -1282,20 +1286,18 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7785_pinmux_info = {
+const struct sh_pfc_soc_info sh7785_pinmux_info = {
.name = "sh7785_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PA7,
- .last_gpio = GPIO_FN_IRQOUT,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
index 1e18b58..8ae0e32 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -191,7 +191,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t pinmux_data[] = {
+static const pinmux_enum_t pinmux_data[] = {
/* PA GPIO */
PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
@@ -427,7 +427,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SSI3_SCK_MARK, P2MSEL6_1, P2MSEL5_1, PJ1_FN),
};
-static struct pinmux_gpio pinmux_gpios[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(GPIO_PA7, PA7_DATA),
PINMUX_GPIO(GPIO_PA6, PA6_DATA),
@@ -505,147 +505,151 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
+static const struct pinmux_func pinmux_func_gpios[] = {
/* FN */
- PINMUX_GPIO(GPIO_FN_CDE, CDE_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_MAGIC, ETH_MAGIC_MARK),
- PINMUX_GPIO(GPIO_FN_DISP, DISP_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_LINK, ETH_LINK_MARK),
- PINMUX_GPIO(GPIO_FN_DR5, DR5_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TX_ER, ETH_TX_ER_MARK),
- PINMUX_GPIO(GPIO_FN_DR4, DR4_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TX_EN, ETH_TX_EN_MARK),
- PINMUX_GPIO(GPIO_FN_DR3, DR3_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TXD3, ETH_TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_DR2, DR2_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TXD2, ETH_TXD2_MARK),
- PINMUX_GPIO(GPIO_FN_DR1, DR1_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TXD1, ETH_TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_DR0, DR0_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TXD0, ETH_TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_VSYNC, VSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_ODDF, ODDF_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
- PINMUX_GPIO(GPIO_FN_DG5, DG5_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_MDIO, ETH_MDIO_MARK),
- PINMUX_GPIO(GPIO_FN_DG4, DG4_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RX_CLK, ETH_RX_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DG3, DG3_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_MDC, ETH_MDC_MARK),
- PINMUX_GPIO(GPIO_FN_DG2, DG2_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_COL, ETH_COL_MARK),
- PINMUX_GPIO(GPIO_FN_DG1, DG1_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_TX_CLK, ETH_TX_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_DG0, DG0_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_CRS, ETH_CRS_MARK),
- PINMUX_GPIO(GPIO_FN_DCLKIN, DCLKIN_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
- PINMUX_GPIO(GPIO_FN_HSYNC, HSYNC_MARK),
- PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
- PINMUX_GPIO(GPIO_FN_DB5, DB5_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RXD3, ETH_RXD3_MARK),
- PINMUX_GPIO(GPIO_FN_DB4, DB4_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RXD2, ETH_RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_DB3, DB3_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RXD1, ETH_RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_DB2, DB2_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RXD0, ETH_RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_DB1, DB1_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RX_DV, ETH_RX_DV_MARK),
- PINMUX_GPIO(GPIO_FN_DB0, DB0_MARK),
- PINMUX_GPIO(GPIO_FN_ETH_RX_ER, ETH_RX_ER_MARK),
- PINMUX_GPIO(GPIO_FN_DCLKOUT, DCLKOUT_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
- PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
- PINMUX_GPIO(GPIO_FN_USB_OVC1, USB_OVC1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_USB_OVC0, USB_OVC0_MARK),
- PINMUX_GPIO(GPIO_FN_USB_PENC1, USB_PENC1_MARK),
- PINMUX_GPIO(GPIO_FN_USB_PENC0, USB_PENC0_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1CMD, SDIF1CMD_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1CD, SDIF1CD_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1WP, SDIF1WP_MARK),
- PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1CLK, SDIF1CLK_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1D3, SDIF1D3_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1D2, SDIF1D2_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1D1, SDIF1D1_MARK),
- PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF1D0, SDIF1D0_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SSI2_SDATA, SSI2_SDATA_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
- PINMUX_GPIO(GPIO_FN_SSI2_SCK, SSI2_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
- PINMUX_GPIO(GPIO_FN_SSI2_WS, SSI2_WS_MARK),
- PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0CMD, SDIF0CMD_MARK),
- PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0CD, SDIF0CD_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0WP, SDIF0WP_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0CLK, SDIF0CLK_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
- PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0D3, SDIF0D3_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0D2, SDIF0D2_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0D1, SDIF0D1_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
- PINMUX_GPIO(GPIO_FN_SDIF0D0, SDIF0D0_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
- PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
- PINMUX_GPIO(GPIO_FN_SSI3_WS, SSI3_WS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
- PINMUX_GPIO(GPIO_FN_SSI3_SDATA, SSI3_SDATA_MARK),
- PINMUX_GPIO(GPIO_FN_FSTATUS, FSTATUS_MARK),
- PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
- PINMUX_GPIO(GPIO_FN_SSI3_SCK, SSI3_SCK_MARK),
- PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+ GPIO_FN(CDE),
+ GPIO_FN(ETH_MAGIC),
+ GPIO_FN(DISP),
+ GPIO_FN(ETH_LINK),
+ GPIO_FN(DR5),
+ GPIO_FN(ETH_TX_ER),
+ GPIO_FN(DR4),
+ GPIO_FN(ETH_TX_EN),
+ GPIO_FN(DR3),
+ GPIO_FN(ETH_TXD3),
+ GPIO_FN(DR2),
+ GPIO_FN(ETH_TXD2),
+ GPIO_FN(DR1),
+ GPIO_FN(ETH_TXD1),
+ GPIO_FN(DR0),
+ GPIO_FN(ETH_TXD0),
+ GPIO_FN(VSYNC),
+ GPIO_FN(HSPI_CLK),
+ GPIO_FN(ODDF),
+ GPIO_FN(HSPI_CS),
+ GPIO_FN(DG5),
+ GPIO_FN(ETH_MDIO),
+ GPIO_FN(DG4),
+ GPIO_FN(ETH_RX_CLK),
+ GPIO_FN(DG3),
+ GPIO_FN(ETH_MDC),
+ GPIO_FN(DG2),
+ GPIO_FN(ETH_COL),
+ GPIO_FN(DG1),
+ GPIO_FN(ETH_TX_CLK),
+ GPIO_FN(DG0),
+ GPIO_FN(ETH_CRS),
+ GPIO_FN(DCLKIN),
+ GPIO_FN(HSPI_RX),
+ GPIO_FN(HSYNC),
+ GPIO_FN(HSPI_TX),
+ GPIO_FN(DB5),
+ GPIO_FN(ETH_RXD3),
+ GPIO_FN(DB4),
+ GPIO_FN(ETH_RXD2),
+ GPIO_FN(DB3),
+ GPIO_FN(ETH_RXD1),
+ GPIO_FN(DB2),
+ GPIO_FN(ETH_RXD0),
+ GPIO_FN(DB1),
+ GPIO_FN(ETH_RX_DV),
+ GPIO_FN(DB0),
+ GPIO_FN(ETH_RX_ER),
+ GPIO_FN(DCLKOUT),
+ GPIO_FN(SCIF1_SCK),
+ GPIO_FN(SCIF1_RXD),
+ GPIO_FN(SCIF1_TXD),
+ GPIO_FN(DACK1),
+ GPIO_FN(BACK),
+ GPIO_FN(FALE),
+ GPIO_FN(DACK0),
+ GPIO_FN(FCLE),
+ GPIO_FN(DREQ1),
+ GPIO_FN(BREQ),
+ GPIO_FN(USB_OVC1),
+ GPIO_FN(DREQ0),
+ GPIO_FN(USB_OVC0),
+ GPIO_FN(USB_PENC1),
+ GPIO_FN(USB_PENC0),
+ GPIO_FN(HAC1_SDOUT),
+ GPIO_FN(SSI1_SDATA),
+ GPIO_FN(SDIF1CMD),
+ GPIO_FN(HAC1_SDIN),
+ GPIO_FN(SSI1_SCK),
+ GPIO_FN(SDIF1CD),
+ GPIO_FN(HAC1_SYNC),
+ GPIO_FN(SSI1_WS),
+ GPIO_FN(SDIF1WP),
+ GPIO_FN(HAC1_BITCLK),
+ GPIO_FN(SSI1_CLK),
+ GPIO_FN(SDIF1CLK),
+ GPIO_FN(HAC0_SDOUT),
+ GPIO_FN(SSI0_SDATA),
+ GPIO_FN(SDIF1D3),
+ GPIO_FN(HAC0_SDIN),
+ GPIO_FN(SSI0_SCK),
+ GPIO_FN(SDIF1D2),
+ GPIO_FN(HAC0_SYNC),
+ GPIO_FN(SSI0_WS),
+ GPIO_FN(SDIF1D1),
+ GPIO_FN(HAC0_BITCLK),
+ GPIO_FN(SSI0_CLK),
+ GPIO_FN(SDIF1D0),
+ GPIO_FN(SCIF3_SCK),
+ GPIO_FN(SSI2_SDATA),
+ GPIO_FN(SCIF3_RXD),
+ GPIO_FN(TCLK),
+ GPIO_FN(SSI2_SCK),
+ GPIO_FN(SCIF3_TXD),
+ GPIO_FN(HAC_RES),
+ GPIO_FN(SSI2_WS),
+ GPIO_FN(DACK3),
+ GPIO_FN(SDIF0CMD),
+ GPIO_FN(DACK2),
+ GPIO_FN(SDIF0CD),
+ GPIO_FN(DREQ3),
+ GPIO_FN(SDIF0WP),
+ GPIO_FN(SCIF0_CTS),
+ GPIO_FN(DREQ2),
+ GPIO_FN(SDIF0CLK),
+ GPIO_FN(SCIF0_RTS),
+ GPIO_FN(IRL7),
+ GPIO_FN(SDIF0D3),
+ GPIO_FN(SCIF0_SCK),
+ GPIO_FN(IRL6),
+ GPIO_FN(SDIF0D2),
+ GPIO_FN(SCIF0_RXD),
+ GPIO_FN(IRL5),
+ GPIO_FN(SDIF0D1),
+ GPIO_FN(SCIF0_TXD),
+ GPIO_FN(IRL4),
+ GPIO_FN(SDIF0D0),
+ GPIO_FN(SCIF5_SCK),
+ GPIO_FN(FRB),
+ GPIO_FN(SCIF5_RXD),
+ GPIO_FN(IOIS16),
+ GPIO_FN(SCIF5_TXD),
+ GPIO_FN(CE2B),
+ GPIO_FN(DRAK3),
+ GPIO_FN(CE2A),
+ GPIO_FN(SCIF4_SCK),
+ GPIO_FN(DRAK2),
+ GPIO_FN(SSI3_WS),
+ GPIO_FN(SCIF4_RXD),
+ GPIO_FN(DRAK1),
+ GPIO_FN(SSI3_SDATA),
+ GPIO_FN(FSTATUS),
+ GPIO_FN(SCIF4_TXD),
+ GPIO_FN(DRAK0),
+ GPIO_FN(SSI3_SCK),
+ GPIO_FN(FSE),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) {
PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
@@ -775,7 +779,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{}
};
-static struct pinmux_data_reg pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xffcc0020, 8) {
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
@@ -815,20 +819,18 @@ static struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info sh7786_pinmux_info = {
+const struct sh_pfc_soc_info sh7786_pinmux_info = {
.name = "sh7786_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PA7,
- .last_gpio = GPIO_FN_IRL4,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
- .gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
index ccf6918..6594c8c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -147,7 +147,7 @@ enum {
PINMUX_MARK_END,
};
-static pinmux_enum_t shx3_pinmux_data[] = {
+static const pinmux_enum_t shx3_pinmux_data[] = {
/* PA GPIO */
PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
@@ -306,7 +306,7 @@ static pinmux_enum_t shx3_pinmux_data[] = {
PINMUX_DATA(IRQOUT_MARK, PH0_FN),
};
-static struct pinmux_gpio shx3_pinmux_gpios[] = {
+static struct sh_pfc_pin shx3_pinmux_pins[] = {
/* PA */
PINMUX_GPIO(GPIO_PA7, PA7_DATA),
PINMUX_GPIO(GPIO_PA6, PA6_DATA),
@@ -384,73 +384,77 @@ static struct pinmux_gpio shx3_pinmux_gpios[] = {
PINMUX_GPIO(GPIO_PH2, PH2_DATA),
PINMUX_GPIO(GPIO_PH1, PH1_DATA),
PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+};
+
+#define PINMUX_FN_BASE ARRAY_SIZE(shx3_pinmux_pins)
+static const struct pinmux_func shx3_pinmux_func_gpios[] = {
/* FN */
- PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
- PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
- PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
- PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
- PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
- PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
- PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
- PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
- PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
- PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
- PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
- PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
- PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
- PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
- PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
- PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
- PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
- PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
- PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK),
- PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK),
- PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
- PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
- PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
- PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK),
- PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
- PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
- PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
- PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
- PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
- PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
- PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
- PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
- PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
- PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
- PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
- PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK),
- PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK),
- PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK),
- PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK),
- PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
- PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
- PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
- PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
- PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
- PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
- PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
- PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
- PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
- PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
- PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
- PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
- PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+ GPIO_FN(D31),
+ GPIO_FN(D30),
+ GPIO_FN(D29),
+ GPIO_FN(D28),
+ GPIO_FN(D27),
+ GPIO_FN(D26),
+ GPIO_FN(D25),
+ GPIO_FN(D24),
+ GPIO_FN(D23),
+ GPIO_FN(D22),
+ GPIO_FN(D21),
+ GPIO_FN(D20),
+ GPIO_FN(D19),
+ GPIO_FN(D18),
+ GPIO_FN(D17),
+ GPIO_FN(D16),
+ GPIO_FN(BACK),
+ GPIO_FN(BREQ),
+ GPIO_FN(WE3),
+ GPIO_FN(WE2),
+ GPIO_FN(CS6),
+ GPIO_FN(CS5),
+ GPIO_FN(CS4),
+ GPIO_FN(CLKOUTENB),
+ GPIO_FN(DACK3),
+ GPIO_FN(DACK2),
+ GPIO_FN(DACK1),
+ GPIO_FN(DACK0),
+ GPIO_FN(DREQ3),
+ GPIO_FN(DREQ2),
+ GPIO_FN(DREQ1),
+ GPIO_FN(DREQ0),
+ GPIO_FN(IRQ3),
+ GPIO_FN(IRQ2),
+ GPIO_FN(IRQ1),
+ GPIO_FN(IRQ0),
+ GPIO_FN(DRAK3),
+ GPIO_FN(DRAK2),
+ GPIO_FN(DRAK1),
+ GPIO_FN(DRAK0),
+ GPIO_FN(SCK3),
+ GPIO_FN(SCK2),
+ GPIO_FN(SCK1),
+ GPIO_FN(SCK0),
+ GPIO_FN(IRL3),
+ GPIO_FN(IRL2),
+ GPIO_FN(IRL1),
+ GPIO_FN(IRL0),
+ GPIO_FN(TXD3),
+ GPIO_FN(TXD2),
+ GPIO_FN(TXD1),
+ GPIO_FN(TXD0),
+ GPIO_FN(RXD3),
+ GPIO_FN(RXD2),
+ GPIO_FN(RXD1),
+ GPIO_FN(RXD0),
+ GPIO_FN(CE2B),
+ GPIO_FN(CE2A),
+ GPIO_FN(IOIS16),
+ GPIO_FN(STATUS1),
+ GPIO_FN(STATUS0),
+ GPIO_FN(IRQOUT),
};
-static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
@@ -526,7 +530,7 @@ static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
{ },
};
-static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
+static const struct pinmux_data_reg shx3_pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
0, 0, 0, 0, 0, 0, 0, 0,
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
@@ -562,19 +566,17 @@ static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
{ },
};
-struct sh_pfc_soc_info shx3_pinmux_info = {
+const struct sh_pfc_soc_info shx3_pinmux_info = {
.name = "shx3_pfc",
- .reserved_id = PINMUX_RESERVED,
- .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
- .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .first_gpio = GPIO_PA7,
- .last_gpio = GPIO_FN_STATUS0,
- .gpios = shx3_pinmux_gpios,
+ .pins = shx3_pinmux_pins,
+ .nr_pins = ARRAY_SIZE(shx3_pinmux_pins),
+ .func_gpios = shx3_pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(shx3_pinmux_func_gpios),
.gpio_data = shx3_pinmux_data,
.gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
.cfg_regs = shx3_pinmux_config_regs,
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 11e0e13..3492ec9 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -9,7 +9,6 @@
*/
#define DRV_NAME "sh-pfc"
-#define pr_fmt(fmt) KBUILD_MODNAME " pinctrl: " fmt
#include <linux/device.h>
#include <linux/err.h>
@@ -24,25 +23,28 @@
#include <linux/spinlock.h>
#include "core.h"
+#include "../core.h"
+#include "../pinconf.h"
+
+struct sh_pfc_pin_config {
+ u32 type;
+};
struct sh_pfc_pinctrl {
struct pinctrl_dev *pctl;
- struct sh_pfc *pfc;
-
- struct pinmux_gpio **functions;
- unsigned int nr_functions;
+ struct pinctrl_desc pctl_desc;
- struct pinctrl_pin_desc *pads;
- unsigned int nr_pads;
+ struct sh_pfc *pfc;
- spinlock_t lock;
+ struct pinctrl_pin_desc *pins;
+ struct sh_pfc_pin_config *configs;
};
static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- return pmx->nr_pads;
+ return pmx->pfc->info->nr_groups;
}
static const char *sh_pfc_get_group_name(struct pinctrl_dev *pctldev,
@@ -50,16 +52,16 @@ static const char *sh_pfc_get_group_name(struct pinctrl_dev *pctldev,
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- return pmx->pads[selector].name;
+ return pmx->pfc->info->groups[selector].name;
}
-static int sh_pfc_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
+static int sh_pfc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
const unsigned **pins, unsigned *num_pins)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- *pins = &pmx->pads[group].number;
- *num_pins = 1;
+ *pins = pmx->pfc->info->groups[selector].pins;
+ *num_pins = pmx->pfc->info->groups[selector].nr_pins;
return 0;
}
@@ -70,7 +72,7 @@ static void sh_pfc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, "%s", DRV_NAME);
}
-static struct pinctrl_ops sh_pfc_pinctrl_ops = {
+static const struct pinctrl_ops sh_pfc_pinctrl_ops = {
.get_groups_count = sh_pfc_get_groups_count,
.get_group_name = sh_pfc_get_group_name,
.get_group_pins = sh_pfc_get_group_pins,
@@ -81,7 +83,7 @@ static int sh_pfc_get_functions_count(struct pinctrl_dev *pctldev)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- return pmx->nr_functions;
+ return pmx->pfc->info->nr_functions;
}
static const char *sh_pfc_get_function_name(struct pinctrl_dev *pctldev,
@@ -89,136 +91,113 @@ static const char *sh_pfc_get_function_name(struct pinctrl_dev *pctldev,
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- return pmx->functions[selector]->name;
+ return pmx->pfc->info->functions[selector].name;
}
-static int sh_pfc_get_function_groups(struct pinctrl_dev *pctldev, unsigned func,
+static int sh_pfc_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
const char * const **groups,
unsigned * const num_groups)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- *groups = &pmx->functions[func]->name;
- *num_groups = 1;
+ *groups = pmx->pfc->info->functions[selector].groups;
+ *num_groups = pmx->pfc->info->functions[selector].nr_groups;
return 0;
}
-static int sh_pfc_noop_enable(struct pinctrl_dev *pctldev, unsigned func,
+static int sh_pfc_func_enable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
- return 0;
-}
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ const struct sh_pfc_pin_group *grp = &pfc->info->groups[group];
+ unsigned long flags;
+ unsigned int i;
+ int ret = 0;
-static void sh_pfc_noop_disable(struct pinctrl_dev *pctldev, unsigned func,
- unsigned group)
-{
-}
+ spin_lock_irqsave(&pfc->lock, flags);
-static int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
-{
- if (sh_pfc_config_gpio(pfc, offset,
- PINMUX_TYPE_FUNCTION,
- GPIO_CFG_DRYRUN) != 0)
- return -EINVAL;
+ for (i = 0; i < grp->nr_pins; ++i) {
+ int idx = sh_pfc_get_pin_index(pfc, grp->pins[i]);
+ struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
- if (sh_pfc_config_gpio(pfc, offset,
- PINMUX_TYPE_FUNCTION,
- GPIO_CFG_REQ) != 0)
- return -EINVAL;
+ if (cfg->type != PINMUX_TYPE_NONE) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
- return 0;
+ for (i = 0; i < grp->nr_pins; ++i) {
+ ret = sh_pfc_config_mux(pfc, grp->mux[i], PINMUX_TYPE_FUNCTION);
+ if (ret < 0)
+ break;
+ }
+
+done:
+ spin_unlock_irqrestore(&pfc->lock, flags);
+ return ret;
}
-static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
- int new_type)
+static void sh_pfc_func_disable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ const struct sh_pfc_pin_group *grp = &pfc->info->groups[group];
unsigned long flags;
- int pinmux_type;
- int ret = -EINVAL;
+ unsigned int i;
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ for (i = 0; i < grp->nr_pins; ++i) {
+ int idx = sh_pfc_get_pin_index(pfc, grp->pins[i]);
+ struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
- /*
- * See if the present config needs to first be de-configured.
- */
- switch (pinmux_type) {
- case PINMUX_TYPE_GPIO:
- break;
- case PINMUX_TYPE_OUTPUT:
- case PINMUX_TYPE_INPUT:
- case PINMUX_TYPE_INPUT_PULLUP:
- case PINMUX_TYPE_INPUT_PULLDOWN:
- sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
- break;
- default:
- goto err;
+ cfg->type = PINMUX_TYPE_NONE;
}
- /*
- * Dry run
- */
- if (sh_pfc_config_gpio(pfc, offset, new_type,
- GPIO_CFG_DRYRUN) != 0)
- goto err;
-
- /*
- * Request
- */
- if (sh_pfc_config_gpio(pfc, offset, new_type,
- GPIO_CFG_REQ) != 0)
- goto err;
-
- pfc->info->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- pfc->info->gpios[offset].flags |= new_type;
-
- ret = 0;
-
-err:
spin_unlock_irqrestore(&pfc->lock, flags);
-
- return ret;
}
-
static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct sh_pfc *pfc = pmx->pfc;
+ int idx = sh_pfc_get_pin_index(pfc, offset);
+ struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
unsigned long flags;
- int ret, pinmux_type;
+ int ret;
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ if (cfg->type != PINMUX_TYPE_NONE) {
+ dev_err(pfc->dev,
+ "Pin %u is busy, can't configure it as GPIO.\n",
+ offset);
+ ret = -EBUSY;
+ goto done;
+ }
- switch (pinmux_type) {
- case PINMUX_TYPE_FUNCTION:
- pr_notice_once("Use of GPIO API for function requests is "
- "deprecated, convert to pinctrl\n");
- /* handle for now */
- ret = sh_pfc_config_function(pfc, offset);
- if (unlikely(ret < 0))
- goto err;
+ if (!pfc->gpio) {
+ /* If GPIOs are handled externally the pin mux type need to be
+ * set to GPIO here.
+ */
+ const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
- break;
- case PINMUX_TYPE_GPIO:
- case PINMUX_TYPE_INPUT:
- case PINMUX_TYPE_OUTPUT:
- break;
- default:
- pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
- ret = -ENOTSUPP;
- goto err;
+ ret = sh_pfc_config_mux(pfc, pin->enum_id, PINMUX_TYPE_GPIO);
+ if (ret < 0)
+ goto done;
}
+ cfg->type = PINMUX_TYPE_GPIO;
+
ret = 0;
-err:
+done:
spin_unlock_irqrestore(&pfc->lock, flags);
return ret;
@@ -230,15 +209,12 @@ static void sh_pfc_gpio_disable_free(struct pinctrl_dev *pctldev,
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct sh_pfc *pfc = pmx->pfc;
+ int idx = sh_pfc_get_pin_index(pfc, offset);
+ struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
unsigned long flags;
- int pinmux_type;
spin_lock_irqsave(&pfc->lock, flags);
-
- pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
-
- sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
-
+ cfg->type = PINMUX_TYPE_NONE;
spin_unlock_irqrestore(&pfc->lock, flags);
}
@@ -247,207 +223,242 @@ static int sh_pfc_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned offset, bool input)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- int type = input ? PINMUX_TYPE_INPUT : PINMUX_TYPE_OUTPUT;
+ struct sh_pfc *pfc = pmx->pfc;
+ int new_type = input ? PINMUX_TYPE_INPUT : PINMUX_TYPE_OUTPUT;
+ int idx = sh_pfc_get_pin_index(pfc, offset);
+ const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
+ struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
+ unsigned long flags;
+ unsigned int dir;
+ int ret;
+
+ /* Check if the requested direction is supported by the pin. Not all SoC
+ * provide pin config data, so perform the check conditionally.
+ */
+ if (pin->configs) {
+ dir = input ? SH_PFC_PIN_CFG_INPUT : SH_PFC_PIN_CFG_OUTPUT;
+ if (!(pin->configs & dir))
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ ret = sh_pfc_config_mux(pfc, pin->enum_id, new_type);
+ if (ret < 0)
+ goto done;
+
+ cfg->type = new_type;
- return sh_pfc_reconfig_pin(pmx->pfc, offset, type);
+done:
+ spin_unlock_irqrestore(&pfc->lock, flags);
+ return ret;
}
-static struct pinmux_ops sh_pfc_pinmux_ops = {
+static const struct pinmux_ops sh_pfc_pinmux_ops = {
.get_functions_count = sh_pfc_get_functions_count,
.get_function_name = sh_pfc_get_function_name,
.get_function_groups = sh_pfc_get_function_groups,
- .enable = sh_pfc_noop_enable,
- .disable = sh_pfc_noop_disable,
+ .enable = sh_pfc_func_enable,
+ .disable = sh_pfc_func_disable,
.gpio_request_enable = sh_pfc_gpio_request_enable,
.gpio_disable_free = sh_pfc_gpio_disable_free,
.gpio_set_direction = sh_pfc_gpio_set_direction,
};
-static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *config)
+/* Check whether the requested parameter is supported for a pin. */
+static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
+ enum pin_config_param param)
{
- struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct sh_pfc *pfc = pmx->pfc;
+ int idx = sh_pfc_get_pin_index(pfc, _pin);
+ const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
- *config = pfc->info->gpios[pin].flags & PINMUX_FLAG_TYPE;
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ return true;
- return 0;
-}
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
-static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long config)
-{
- struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
-
- /* Validate the new type */
- if (config >= PINMUX_FLAG_TYPE)
- return -EINVAL;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
- return sh_pfc_reconfig_pin(pmx->pfc, pin, config);
+ default:
+ return false;
+ }
}
-static void sh_pfc_pinconf_dbg_show(struct pinctrl_dev *pctldev,
- struct seq_file *s, unsigned pin)
+static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
+ unsigned long *config)
{
- const char *pinmux_type_str[] = {
- [PINMUX_TYPE_NONE] = "none",
- [PINMUX_TYPE_FUNCTION] = "function",
- [PINMUX_TYPE_GPIO] = "gpio",
- [PINMUX_TYPE_OUTPUT] = "output",
- [PINMUX_TYPE_INPUT] = "input",
- [PINMUX_TYPE_INPUT_PULLUP] = "input bias pull up",
- [PINMUX_TYPE_INPUT_PULLDOWN] = "input bias pull down",
- };
- unsigned long config;
- int rc;
-
- rc = sh_pfc_pinconf_get(pctldev, pin, &config);
- if (unlikely(rc != 0))
- return;
-
- seq_printf(s, " %s", pinmux_type_str[config]);
-}
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned long flags;
+ unsigned int bias;
-static struct pinconf_ops sh_pfc_pinconf_ops = {
- .pin_config_get = sh_pfc_pinconf_get,
- .pin_config_set = sh_pfc_pinconf_set,
- .pin_config_dbg_show = sh_pfc_pinconf_dbg_show,
-};
+ if (!sh_pfc_pinconf_validate(pfc, _pin, param))
+ return -ENOTSUPP;
-static struct pinctrl_gpio_range sh_pfc_gpio_range = {
- .name = DRV_NAME,
- .id = 0,
-};
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!pfc->info->ops || !pfc->info->ops->get_bias)
+ return -ENOTSUPP;
-static struct pinctrl_desc sh_pfc_pinctrl_desc = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .pctlops = &sh_pfc_pinctrl_ops,
- .pmxops = &sh_pfc_pinmux_ops,
- .confops = &sh_pfc_pinconf_ops,
-};
+ spin_lock_irqsave(&pfc->lock, flags);
+ bias = pfc->info->ops->get_bias(pfc, _pin);
+ spin_unlock_irqrestore(&pfc->lock, flags);
-static void sh_pfc_map_one_gpio(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx,
- struct pinmux_gpio *gpio, unsigned offset)
-{
- struct pinmux_data_reg *dummy;
- unsigned long flags;
- int bit;
-
- gpio->flags &= ~PINMUX_FLAG_TYPE;
+ if (bias != param)
+ return -EINVAL;
- if (sh_pfc_get_data_reg(pfc, offset, &dummy, &bit) == 0)
- gpio->flags |= PINMUX_TYPE_GPIO;
- else {
- gpio->flags |= PINMUX_TYPE_FUNCTION;
+ *config = 0;
+ break;
- spin_lock_irqsave(&pmx->lock, flags);
- pmx->nr_functions++;
- spin_unlock_irqrestore(&pmx->lock, flags);
+ default:
+ return -ENOTSUPP;
}
+
+ return 0;
}
-/* pinmux ranges -> pinctrl pin descs */
-static int sh_pfc_map_gpios(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
+static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
+ unsigned long config)
{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ enum pin_config_param param = pinconf_to_config_param(config);
unsigned long flags;
- int i;
- pmx->nr_pads = pfc->info->last_gpio - pfc->info->first_gpio + 1;
+ if (!sh_pfc_pinconf_validate(pfc, _pin, param))
+ return -ENOTSUPP;
- pmx->pads = devm_kzalloc(pfc->dev, sizeof(*pmx->pads) * pmx->nr_pads,
- GFP_KERNEL);
- if (unlikely(!pmx->pads)) {
- pmx->nr_pads = 0;
- return -ENOMEM;
- }
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (!pfc->info->ops || !pfc->info->ops->set_bias)
+ return -ENOTSUPP;
- spin_lock_irqsave(&pfc->lock, flags);
+ spin_lock_irqsave(&pfc->lock, flags);
+ pfc->info->ops->set_bias(pfc, _pin, param);
+ spin_unlock_irqrestore(&pfc->lock, flags);
- /*
- * We don't necessarily have a 1:1 mapping between pin and linux
- * GPIO number, as the latter maps to the associated enum_id.
- * Care needs to be taken to translate back to pin space when
- * dealing with any pin configurations.
- */
- for (i = 0; i < pmx->nr_pads; i++) {
- struct pinctrl_pin_desc *pin = pmx->pads + i;
- struct pinmux_gpio *gpio = pfc->info->gpios + i;
+ break;
- pin->number = pfc->info->first_gpio + i;
- pin->name = gpio->name;
+ default:
+ return -ENOTSUPP;
+ }
- /* XXX */
- if (unlikely(!gpio->enum_id))
- continue;
+ return 0;
+}
- sh_pfc_map_one_gpio(pfc, pmx, gpio, i);
- }
+static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+ unsigned long config)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const unsigned int *pins;
+ unsigned int num_pins;
+ unsigned int i;
- spin_unlock_irqrestore(&pfc->lock, flags);
+ pins = pmx->pfc->info->groups[group].pins;
+ num_pins = pmx->pfc->info->groups[group].nr_pins;
- sh_pfc_pinctrl_desc.pins = pmx->pads;
- sh_pfc_pinctrl_desc.npins = pmx->nr_pads;
+ for (i = 0; i < num_pins; ++i)
+ sh_pfc_pinconf_set(pctldev, pins[i], config);
return 0;
}
-static int sh_pfc_map_functions(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
+static const struct pinconf_ops sh_pfc_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = sh_pfc_pinconf_get,
+ .pin_config_set = sh_pfc_pinconf_set,
+ .pin_config_group_set = sh_pfc_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+/* PFC ranges -> pinctrl pin descs */
+static int sh_pfc_map_pins(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
{
- unsigned long flags;
- int i, fn;
+ const struct pinmux_range *ranges;
+ struct pinmux_range def_range;
+ unsigned int nr_ranges;
+ unsigned int nr_pins;
+ unsigned int i;
+
+ if (pfc->info->ranges == NULL) {
+ def_range.begin = 0;
+ def_range.end = pfc->info->nr_pins - 1;
+ ranges = &def_range;
+ nr_ranges = 1;
+ } else {
+ ranges = pfc->info->ranges;
+ nr_ranges = pfc->info->nr_ranges;
+ }
- pmx->functions = devm_kzalloc(pfc->dev, pmx->nr_functions *
- sizeof(*pmx->functions), GFP_KERNEL);
- if (unlikely(!pmx->functions))
+ pmx->pins = devm_kzalloc(pfc->dev,
+ sizeof(*pmx->pins) * pfc->info->nr_pins,
+ GFP_KERNEL);
+ if (unlikely(!pmx->pins))
return -ENOMEM;
- spin_lock_irqsave(&pmx->lock, flags);
-
- for (i = fn = 0; i < pmx->nr_pads; i++) {
- struct pinmux_gpio *gpio = pfc->info->gpios + i;
+ pmx->configs = devm_kzalloc(pfc->dev,
+ sizeof(*pmx->configs) * pfc->info->nr_pins,
+ GFP_KERNEL);
+ if (unlikely(!pmx->configs))
+ return -ENOMEM;
- if ((gpio->flags & PINMUX_FLAG_TYPE) == PINMUX_TYPE_FUNCTION)
- pmx->functions[fn++] = gpio;
+ for (i = 0, nr_pins = 0; i < nr_ranges; ++i) {
+ const struct pinmux_range *range = &ranges[i];
+ unsigned int number;
+
+ for (number = range->begin; number <= range->end;
+ number++, nr_pins++) {
+ struct sh_pfc_pin_config *cfg = &pmx->configs[nr_pins];
+ struct pinctrl_pin_desc *pin = &pmx->pins[nr_pins];
+ const struct sh_pfc_pin *info =
+ &pfc->info->pins[nr_pins];
+
+ pin->number = number;
+ pin->name = info->name;
+ cfg->type = PINMUX_TYPE_NONE;
+ }
}
- spin_unlock_irqrestore(&pmx->lock, flags);
+ pfc->nr_pins = ranges[nr_ranges-1].end + 1;
- return 0;
+ return nr_ranges;
}
int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
{
struct sh_pfc_pinctrl *pmx;
- int ret;
+ int nr_ranges;
pmx = devm_kzalloc(pfc->dev, sizeof(*pmx), GFP_KERNEL);
if (unlikely(!pmx))
return -ENOMEM;
- spin_lock_init(&pmx->lock);
-
pmx->pfc = pfc;
pfc->pinctrl = pmx;
- ret = sh_pfc_map_gpios(pfc, pmx);
- if (unlikely(ret != 0))
- return ret;
+ nr_ranges = sh_pfc_map_pins(pfc, pmx);
+ if (unlikely(nr_ranges < 0))
+ return nr_ranges;
- ret = sh_pfc_map_functions(pfc, pmx);
- if (unlikely(ret != 0))
- return ret;
+ pmx->pctl_desc.name = DRV_NAME;
+ pmx->pctl_desc.owner = THIS_MODULE;
+ pmx->pctl_desc.pctlops = &sh_pfc_pinctrl_ops;
+ pmx->pctl_desc.pmxops = &sh_pfc_pinmux_ops;
+ pmx->pctl_desc.confops = &sh_pfc_pinconf_ops;
+ pmx->pctl_desc.pins = pmx->pins;
+ pmx->pctl_desc.npins = pfc->info->nr_pins;
- pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, pfc->dev, pmx);
- if (IS_ERR(pmx->pctl))
- return PTR_ERR(pmx->pctl);
-
- sh_pfc_gpio_range.npins = pfc->info->last_gpio
- - pfc->info->first_gpio + 1;
- sh_pfc_gpio_range.base = pfc->info->first_gpio;
- sh_pfc_gpio_range.pin_base = pfc->info->first_gpio;
-
- pinctrl_add_gpio_range(pmx->pctl, &sh_pfc_gpio_range);
+ pmx->pctl = pinctrl_register(&pmx->pctl_desc, pfc->dev, pmx);
+ if (pmx->pctl == NULL)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 13049c4..3b785fc 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -15,7 +15,8 @@
#include <asm-generic/gpio.h>
typedef unsigned short pinmux_enum_t;
-typedef unsigned short pinmux_flag_t;
+
+#define SH_PFC_MARK_INVALID ((pinmux_enum_t)-1)
enum {
PINMUX_TYPE_NONE,
@@ -30,44 +31,81 @@ enum {
PINMUX_FLAG_TYPE, /* must be last */
};
-#define PINMUX_FLAG_DBIT_SHIFT 5
-#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
-#define PINMUX_FLAG_DREG_SHIFT 10
-#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
+#define SH_PFC_PIN_CFG_INPUT (1 << 0)
+#define SH_PFC_PIN_CFG_OUTPUT (1 << 1)
+#define SH_PFC_PIN_CFG_PULL_UP (1 << 2)
+#define SH_PFC_PIN_CFG_PULL_DOWN (1 << 3)
-struct pinmux_gpio {
- pinmux_enum_t enum_id;
- pinmux_flag_t flags;
+struct sh_pfc_pin {
+ const pinmux_enum_t enum_id;
const char *name;
+ unsigned int configs;
};
-#define PINMUX_GPIO(gpio, data_or_mark) \
- [gpio] = { .name = __stringify(gpio), .enum_id = data_or_mark, .flags = PINMUX_TYPE_NONE }
+#define SH_PFC_PIN_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .mux = n##_mux, \
+ .nr_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+struct sh_pfc_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int *mux;
+ unsigned int nr_pins;
+};
+
+#define SH_PFC_FUNCTION(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .nr_groups = ARRAY_SIZE(n##_groups), \
+ }
+
+struct sh_pfc_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int nr_groups;
+};
+
+struct pinmux_func {
+ const pinmux_enum_t enum_id;
+ const char *name;
+};
+
+#define PINMUX_GPIO(gpio, data_or_mark) \
+ [gpio] = { \
+ .name = __stringify(gpio), \
+ .enum_id = data_or_mark, \
+ }
+#define PINMUX_GPIO_FN(gpio, base, data_or_mark) \
+ [gpio - (base)] = { \
+ .name = __stringify(gpio), \
+ .enum_id = data_or_mark, \
+ }
#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
struct pinmux_cfg_reg {
unsigned long reg, reg_width, field_width;
- unsigned long *cnt;
- pinmux_enum_t *enum_ids;
- unsigned long *var_field_width;
+ const pinmux_enum_t *enum_ids;
+ const unsigned long *var_field_width;
};
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
.reg = r, .reg_width = r_width, .field_width = f_width, \
- .cnt = (unsigned long [r_width / f_width]) {}, \
.enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)])
#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
.reg = r, .reg_width = r_width, \
- .cnt = (unsigned long [r_width]) {}, \
.var_field_width = (unsigned long [r_width]) { var_fw0, var_fwn, 0 }, \
.enum_ids = (pinmux_enum_t [])
struct pinmux_data_reg {
- unsigned long reg, reg_width, reg_shadow;
- pinmux_enum_t *enum_ids;
- void __iomem *mapped_reg;
+ unsigned long reg, reg_width;
+ const pinmux_enum_t *enum_ids;
};
#define PINMUX_DATA_REG(name, r, r_width) \
@@ -76,11 +114,11 @@ struct pinmux_data_reg {
struct pinmux_irq {
int irq;
- pinmux_enum_t *enum_ids;
+ unsigned short *gpios;
};
#define PINMUX_IRQ(irq_nr, ids...) \
- { .irq = irq_nr, .enum_ids = (pinmux_enum_t []) { ids, 0 } } \
+ { .irq = irq_nr, .gpios = (unsigned short []) { ids, 0 } } \
struct pinmux_range {
pinmux_enum_t begin;
@@ -88,33 +126,49 @@ struct pinmux_range {
pinmux_enum_t force;
};
+struct sh_pfc;
+
+struct sh_pfc_soc_operations {
+ unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
+ void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias);
+};
+
struct sh_pfc_soc_info {
- char *name;
- pinmux_enum_t reserved_id;
- struct pinmux_range data;
+ const char *name;
+ const struct sh_pfc_soc_operations *ops;
+
struct pinmux_range input;
struct pinmux_range input_pd;
struct pinmux_range input_pu;
struct pinmux_range output;
- struct pinmux_range mark;
struct pinmux_range function;
- unsigned first_gpio, last_gpio;
+ const struct sh_pfc_pin *pins;
+ unsigned int nr_pins;
+ const struct pinmux_range *ranges;
+ unsigned int nr_ranges;
+ const struct sh_pfc_pin_group *groups;
+ unsigned int nr_groups;
+ const struct sh_pfc_function *functions;
+ unsigned int nr_functions;
+
+ const struct pinmux_func *func_gpios;
+ unsigned int nr_func_gpios;
- struct pinmux_gpio *gpios;
- struct pinmux_cfg_reg *cfg_regs;
- struct pinmux_data_reg *data_regs;
+ const struct pinmux_cfg_reg *cfg_regs;
+ const struct pinmux_data_reg *data_regs;
- pinmux_enum_t *gpio_data;
+ const pinmux_enum_t *gpio_data;
unsigned int gpio_data_size;
- struct pinmux_irq *gpio_irq;
+ const struct pinmux_irq *gpio_irq;
unsigned int gpio_irq_size;
unsigned long unlock_reg;
};
-enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
+enum { GPIO_CFG_REQ, GPIO_CFG_FREE };
/* helper macro for port */
#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
@@ -126,6 +180,23 @@ enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx)
+#define PORT_10_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
+ PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
+ PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
+ PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
+ PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
+
+#define PORT_32(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_1(fn, pfx##31, sfx)
+
+#define PORT_32_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
+ PORT_10_REV(fn, pfx, sfx)
+
#define PORT_90(fn, pfx, sfx) \
PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \
PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \
@@ -137,7 +208,7 @@ enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused)
-#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
+#define GPIO_FN(str) PINMUX_GPIO_FN(GPIO_FN_##str, PINMUX_FN_BASE, str##_MARK)
/* helper macro for pinmux_enum_t */
#define PORT_DATA_I(nr) \
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 295b349..3e5a887 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -15,12 +15,12 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/spinlock.h>
-#include <asm/mach/irq.h>
#define MAX_GPIO_PER_REG 32
#define PIN_OFFSET(pin) (pin % MAX_GPIO_PER_REG)
@@ -75,7 +75,7 @@ struct plgpio {
int (*o2p)(int offset); /* offset_to_pin */
u32 p2o_regs;
struct plgpio_regs regs;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
struct plgpio_regs *csave_regs;
#endif
};
@@ -554,7 +554,7 @@ static int plgpio_probe(struct platform_device *pdev)
if (IS_ERR(plgpio->clk))
dev_warn(&pdev->dev, "clk_get() failed, work without it\n");
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
plgpio->csave_regs = devm_kzalloc(&pdev->dev,
sizeof(*plgpio->csave_regs) *
DIV_ROUND_UP(plgpio->chip.ngpio, MAX_GPIO_PER_REG),
@@ -641,7 +641,7 @@ unprepare_clk:
return ret;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int plgpio_suspend(struct device *dev)
{
struct plgpio *plgpio = dev_get_drvdata(dev);
diff --git a/drivers/pinctrl/vt8500/Kconfig b/drivers/pinctrl/vt8500/Kconfig
new file mode 100644
index 0000000..55724a7
--- /dev/null
+++ b/drivers/pinctrl/vt8500/Kconfig
@@ -0,0 +1,52 @@
+#
+# VIA/Wondermedia PINCTRL drivers
+#
+
+if ARCH_VT8500
+
+config PINCTRL_WMT
+ bool
+ select PINMUX
+ select GENERIC_PINCONF
+
+config PINCTRL_VT8500
+ bool "VIA VT8500 pin controller driver"
+ depends on ARCH_WM8505
+ select PINCTRL_WMT
+ help
+ Say yes here to support the gpio/pin control module on
+ VIA VT8500 SoCs.
+
+config PINCTRL_WM8505
+ bool "Wondermedia WM8505 pin controller driver"
+ depends on ARCH_WM8505
+ select PINCTRL_WMT
+ help
+ Say yes here to support the gpio/pin control module on
+ Wondermedia WM8505 SoCs.
+
+config PINCTRL_WM8650
+ bool "Wondermedia WM8650 pin controller driver"
+ depends on ARCH_WM8505
+ select PINCTRL_WMT
+ help
+ Say yes here to support the gpio/pin control module on
+ Wondermedia WM8650 SoCs.
+
+config PINCTRL_WM8750
+ bool "Wondermedia WM8750 pin controller driver"
+ depends on ARCH_WM8750
+ select PINCTRL_WMT
+ help
+ Say yes here to support the gpio/pin control module on
+ Wondermedia WM8750 SoCs.
+
+config PINCTRL_WM8850
+ bool "Wondermedia WM8850 pin controller driver"
+ depends on ARCH_WM8850
+ select PINCTRL_WMT
+ help
+ Say yes here to support the gpio/pin control module on
+ Wondermedia WM8850 SoCs.
+
+endif
diff --git a/drivers/pinctrl/vt8500/Makefile b/drivers/pinctrl/vt8500/Makefile
new file mode 100644
index 0000000..24ec45d
--- /dev/null
+++ b/drivers/pinctrl/vt8500/Makefile
@@ -0,0 +1,8 @@
+# VIA/Wondermedia pinctrl support
+
+obj-$(CONFIG_PINCTRL_WMT) += pinctrl-wmt.o
+obj-$(CONFIG_PINCTRL_VT8500) += pinctrl-vt8500.o
+obj-$(CONFIG_PINCTRL_WM8505) += pinctrl-wm8505.o
+obj-$(CONFIG_PINCTRL_WM8650) += pinctrl-wm8650.o
+obj-$(CONFIG_PINCTRL_WM8750) += pinctrl-wm8750.o
+obj-$(CONFIG_PINCTRL_WM8850) += pinctrl-wm8850.o
diff --git a/drivers/pinctrl/vt8500/pinctrl-vt8500.c b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
new file mode 100644
index 0000000..f2fe9f8
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
@@ -0,0 +1,501 @@
+/*
+ * Pinctrl data for VIA VT8500 SoC
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-wmt.h"
+
+/*
+ * Describe the register offsets within the GPIO memory space
+ * The dedicated external GPIO's should always be listed in bank 0
+ * so they are exported in the 0..31 range which is what users
+ * expect.
+ *
+ * Do not reorder these banks as it will change the pin numbering
+ */
+static const struct wmt_pinctrl_bank_registers vt8500_banks[] = {
+ WMT_PINCTRL_BANK(NO_REG, 0x3C, 0x5C, 0x7C, NO_REG, NO_REG), /* 0 */
+ WMT_PINCTRL_BANK(0x00, 0x20, 0x40, 0x60, NO_REG, NO_REG), /* 1 */
+ WMT_PINCTRL_BANK(0x04, 0x24, 0x44, 0x64, NO_REG, NO_REG), /* 2 */
+ WMT_PINCTRL_BANK(0x08, 0x28, 0x48, 0x68, NO_REG, NO_REG), /* 3 */
+ WMT_PINCTRL_BANK(0x0C, 0x2C, 0x4C, 0x6C, NO_REG, NO_REG), /* 4 */
+ WMT_PINCTRL_BANK(0x10, 0x30, 0x50, 0x70, NO_REG, NO_REG), /* 5 */
+ WMT_PINCTRL_BANK(0x14, 0x34, 0x54, 0x74, NO_REG, NO_REG), /* 6 */
+};
+
+/* Please keep sorted by bank/bit */
+#define WMT_PIN_EXTGPIO0 WMT_PIN(0, 0)
+#define WMT_PIN_EXTGPIO1 WMT_PIN(0, 1)
+#define WMT_PIN_EXTGPIO2 WMT_PIN(0, 2)
+#define WMT_PIN_EXTGPIO3 WMT_PIN(0, 3)
+#define WMT_PIN_EXTGPIO4 WMT_PIN(0, 4)
+#define WMT_PIN_EXTGPIO5 WMT_PIN(0, 5)
+#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
+#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
+#define WMT_PIN_EXTGPIO8 WMT_PIN(0, 8)
+#define WMT_PIN_UART0RTS WMT_PIN(1, 0)
+#define WMT_PIN_UART0TXD WMT_PIN(1, 1)
+#define WMT_PIN_UART0CTS WMT_PIN(1, 2)
+#define WMT_PIN_UART0RXD WMT_PIN(1, 3)
+#define WMT_PIN_UART1RTS WMT_PIN(1, 4)
+#define WMT_PIN_UART1TXD WMT_PIN(1, 5)
+#define WMT_PIN_UART1CTS WMT_PIN(1, 6)
+#define WMT_PIN_UART1RXD WMT_PIN(1, 7)
+#define WMT_PIN_SPI0CLK WMT_PIN(1, 8)
+#define WMT_PIN_SPI0SS WMT_PIN(1, 9)
+#define WMT_PIN_SPI0MISO WMT_PIN(1, 10)
+#define WMT_PIN_SPI0MOSI WMT_PIN(1, 11)
+#define WMT_PIN_SPI1CLK WMT_PIN(1, 12)
+#define WMT_PIN_SPI1SS WMT_PIN(1, 13)
+#define WMT_PIN_SPI1MISO WMT_PIN(1, 14)
+#define WMT_PIN_SPI1MOSI WMT_PIN(1, 15)
+#define WMT_PIN_SPI2CLK WMT_PIN(1, 16)
+#define WMT_PIN_SPI2SS WMT_PIN(1, 17)
+#define WMT_PIN_SPI2MISO WMT_PIN(1, 18)
+#define WMT_PIN_SPI2MOSI WMT_PIN(1, 19)
+#define WMT_PIN_SDDATA0 WMT_PIN(2, 0)
+#define WMT_PIN_SDDATA1 WMT_PIN(2, 1)
+#define WMT_PIN_SDDATA2 WMT_PIN(2, 2)
+#define WMT_PIN_SDDATA3 WMT_PIN(2, 3)
+#define WMT_PIN_MMCDATA0 WMT_PIN(2, 4)
+#define WMT_PIN_MMCDATA1 WMT_PIN(2, 5)
+#define WMT_PIN_MMCDATA2 WMT_PIN(2, 6)
+#define WMT_PIN_MMCDATA3 WMT_PIN(2, 7)
+#define WMT_PIN_SDCLK WMT_PIN(2, 8)
+#define WMT_PIN_SDWP WMT_PIN(2, 9)
+#define WMT_PIN_SDCMD WMT_PIN(2, 10)
+#define WMT_PIN_MSDATA0 WMT_PIN(2, 16)
+#define WMT_PIN_MSDATA1 WMT_PIN(2, 17)
+#define WMT_PIN_MSDATA2 WMT_PIN(2, 18)
+#define WMT_PIN_MSDATA3 WMT_PIN(2, 19)
+#define WMT_PIN_MSCLK WMT_PIN(2, 20)
+#define WMT_PIN_MSBS WMT_PIN(2, 21)
+#define WMT_PIN_MSINS WMT_PIN(2, 22)
+#define WMT_PIN_I2C0SCL WMT_PIN(2, 24)
+#define WMT_PIN_I2C0SDA WMT_PIN(2, 25)
+#define WMT_PIN_I2C1SCL WMT_PIN(2, 26)
+#define WMT_PIN_I2C1SDA WMT_PIN(2, 27)
+#define WMT_PIN_MII0RXD0 WMT_PIN(3, 0)
+#define WMT_PIN_MII0RXD1 WMT_PIN(3, 1)
+#define WMT_PIN_MII0RXD2 WMT_PIN(3, 2)
+#define WMT_PIN_MII0RXD3 WMT_PIN(3, 3)
+#define WMT_PIN_MII0RXCLK WMT_PIN(3, 4)
+#define WMT_PIN_MII0RXDV WMT_PIN(3, 5)
+#define WMT_PIN_MII0RXERR WMT_PIN(3, 6)
+#define WMT_PIN_MII0PHYRST WMT_PIN(3, 7)
+#define WMT_PIN_MII0TXD0 WMT_PIN(3, 8)
+#define WMT_PIN_MII0TXD1 WMT_PIN(3, 9)
+#define WMT_PIN_MII0TXD2 WMT_PIN(3, 10)
+#define WMT_PIN_MII0TXD3 WMT_PIN(3, 11)
+#define WMT_PIN_MII0TXCLK WMT_PIN(3, 12)
+#define WMT_PIN_MII0TXEN WMT_PIN(3, 13)
+#define WMT_PIN_MII0TXERR WMT_PIN(3, 14)
+#define WMT_PIN_MII0PHYPD WMT_PIN(3, 15)
+#define WMT_PIN_MII0COL WMT_PIN(3, 16)
+#define WMT_PIN_MII0CRS WMT_PIN(3, 17)
+#define WMT_PIN_MII0MDIO WMT_PIN(3, 18)
+#define WMT_PIN_MII0MDC WMT_PIN(3, 19)
+#define WMT_PIN_SEECS WMT_PIN(3, 20)
+#define WMT_PIN_SEECK WMT_PIN(3, 21)
+#define WMT_PIN_SEEDI WMT_PIN(3, 22)
+#define WMT_PIN_SEEDO WMT_PIN(3, 23)
+#define WMT_PIN_IDEDREQ0 WMT_PIN(3, 24)
+#define WMT_PIN_IDEDREQ1 WMT_PIN(3, 25)
+#define WMT_PIN_IDEIOW WMT_PIN(3, 26)
+#define WMT_PIN_IDEIOR WMT_PIN(3, 27)
+#define WMT_PIN_IDEDACK WMT_PIN(3, 28)
+#define WMT_PIN_IDEIORDY WMT_PIN(3, 29)
+#define WMT_PIN_IDEINTRQ WMT_PIN(3, 30)
+#define WMT_PIN_VDIN0 WMT_PIN(4, 0)
+#define WMT_PIN_VDIN1 WMT_PIN(4, 1)
+#define WMT_PIN_VDIN2 WMT_PIN(4, 2)
+#define WMT_PIN_VDIN3 WMT_PIN(4, 3)
+#define WMT_PIN_VDIN4 WMT_PIN(4, 4)
+#define WMT_PIN_VDIN5 WMT_PIN(4, 5)
+#define WMT_PIN_VDIN6 WMT_PIN(4, 6)
+#define WMT_PIN_VDIN7 WMT_PIN(4, 7)
+#define WMT_PIN_VDOUT0 WMT_PIN(4, 8)
+#define WMT_PIN_VDOUT1 WMT_PIN(4, 9)
+#define WMT_PIN_VDOUT2 WMT_PIN(4, 10)
+#define WMT_PIN_VDOUT3 WMT_PIN(4, 11)
+#define WMT_PIN_VDOUT4 WMT_PIN(4, 12)
+#define WMT_PIN_VDOUT5 WMT_PIN(4, 13)
+#define WMT_PIN_NANDCLE0 WMT_PIN(4, 14)
+#define WMT_PIN_NANDCLE1 WMT_PIN(4, 15)
+#define WMT_PIN_VDOUT6_7 WMT_PIN(4, 16)
+#define WMT_PIN_VHSYNC WMT_PIN(4, 17)
+#define WMT_PIN_VVSYNC WMT_PIN(4, 18)
+#define WMT_PIN_TSDIN0 WMT_PIN(5, 8)
+#define WMT_PIN_TSDIN1 WMT_PIN(5, 9)
+#define WMT_PIN_TSDIN2 WMT_PIN(5, 10)
+#define WMT_PIN_TSDIN3 WMT_PIN(5, 11)
+#define WMT_PIN_TSDIN4 WMT_PIN(5, 12)
+#define WMT_PIN_TSDIN5 WMT_PIN(5, 13)
+#define WMT_PIN_TSDIN6 WMT_PIN(5, 14)
+#define WMT_PIN_TSDIN7 WMT_PIN(5, 15)
+#define WMT_PIN_TSSYNC WMT_PIN(5, 16)
+#define WMT_PIN_TSVALID WMT_PIN(5, 17)
+#define WMT_PIN_TSCLK WMT_PIN(5, 18)
+#define WMT_PIN_LCDD0 WMT_PIN(6, 0)
+#define WMT_PIN_LCDD1 WMT_PIN(6, 1)
+#define WMT_PIN_LCDD2 WMT_PIN(6, 2)
+#define WMT_PIN_LCDD3 WMT_PIN(6, 3)
+#define WMT_PIN_LCDD4 WMT_PIN(6, 4)
+#define WMT_PIN_LCDD5 WMT_PIN(6, 5)
+#define WMT_PIN_LCDD6 WMT_PIN(6, 6)
+#define WMT_PIN_LCDD7 WMT_PIN(6, 7)
+#define WMT_PIN_LCDD8 WMT_PIN(6, 8)
+#define WMT_PIN_LCDD9 WMT_PIN(6, 9)
+#define WMT_PIN_LCDD10 WMT_PIN(6, 10)
+#define WMT_PIN_LCDD11 WMT_PIN(6, 11)
+#define WMT_PIN_LCDD12 WMT_PIN(6, 12)
+#define WMT_PIN_LCDD13 WMT_PIN(6, 13)
+#define WMT_PIN_LCDD14 WMT_PIN(6, 14)
+#define WMT_PIN_LCDD15 WMT_PIN(6, 15)
+#define WMT_PIN_LCDD16 WMT_PIN(6, 16)
+#define WMT_PIN_LCDD17 WMT_PIN(6, 17)
+#define WMT_PIN_LCDCLK WMT_PIN(6, 18)
+#define WMT_PIN_LCDDEN WMT_PIN(6, 19)
+#define WMT_PIN_LCDLINE WMT_PIN(6, 20)
+#define WMT_PIN_LCDFRM WMT_PIN(6, 21)
+#define WMT_PIN_LCDBIAS WMT_PIN(6, 22)
+
+static const struct pinctrl_pin_desc vt8500_pins[] = {
+ PINCTRL_PIN(WMT_PIN_EXTGPIO0, "extgpio0"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO1, "extgpio1"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO2, "extgpio2"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO3, "extgpio3"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO4, "extgpio4"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO5, "extgpio5"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO6, "extgpio6"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO7, "extgpio7"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO8, "extgpio8"),
+ PINCTRL_PIN(WMT_PIN_UART0RTS, "uart0_rts"),
+ PINCTRL_PIN(WMT_PIN_UART0TXD, "uart0_txd"),
+ PINCTRL_PIN(WMT_PIN_UART0CTS, "uart0_cts"),
+ PINCTRL_PIN(WMT_PIN_UART0RXD, "uart0_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART1RTS, "uart1_rts"),
+ PINCTRL_PIN(WMT_PIN_UART1TXD, "uart1_txd"),
+ PINCTRL_PIN(WMT_PIN_UART1CTS, "uart1_cts"),
+ PINCTRL_PIN(WMT_PIN_UART1RXD, "uart1_rxd"),
+ PINCTRL_PIN(WMT_PIN_SPI0CLK, "spi0_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI0SS, "spi0_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI0MISO, "spi0_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI0MOSI, "spi0_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI1CLK, "spi1_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI1SS, "spi1_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI1MISO, "spi1_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI1MOSI, "spi1_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI2CLK, "spi2_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI2SS, "spi2_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI2MISO, "spi2_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI2MOSI, "spi2_mosi"),
+ PINCTRL_PIN(WMT_PIN_SDDATA0, "sd_data0"),
+ PINCTRL_PIN(WMT_PIN_SDDATA1, "sd_data1"),
+ PINCTRL_PIN(WMT_PIN_SDDATA2, "sd_data2"),
+ PINCTRL_PIN(WMT_PIN_SDDATA3, "sd_data3"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA0, "mmc_data0"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA1, "mmc_data1"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA2, "mmc_data2"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA3, "mmc_data3"),
+ PINCTRL_PIN(WMT_PIN_SDCLK, "sd_clk"),
+ PINCTRL_PIN(WMT_PIN_SDWP, "sd_wp"),
+ PINCTRL_PIN(WMT_PIN_SDCMD, "sd_cmd"),
+ PINCTRL_PIN(WMT_PIN_MSDATA0, "ms_data0"),
+ PINCTRL_PIN(WMT_PIN_MSDATA1, "ms_data1"),
+ PINCTRL_PIN(WMT_PIN_MSDATA2, "ms_data2"),
+ PINCTRL_PIN(WMT_PIN_MSDATA3, "ms_data3"),
+ PINCTRL_PIN(WMT_PIN_MSCLK, "ms_clk"),
+ PINCTRL_PIN(WMT_PIN_MSBS, "ms_bs"),
+ PINCTRL_PIN(WMT_PIN_MSINS, "ms_ins"),
+ PINCTRL_PIN(WMT_PIN_I2C0SCL, "i2c0_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C0SDA, "i2c0_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C1SCL, "i2c1_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C1SDA, "i2c1_sda"),
+ PINCTRL_PIN(WMT_PIN_MII0RXD0, "mii0_rxd0"),
+ PINCTRL_PIN(WMT_PIN_MII0RXD1, "mii0_rxd1"),
+ PINCTRL_PIN(WMT_PIN_MII0RXD2, "mii0_rxd2"),
+ PINCTRL_PIN(WMT_PIN_MII0RXD3, "mii0_rxd3"),
+ PINCTRL_PIN(WMT_PIN_MII0RXCLK, "mii0_rxclk"),
+ PINCTRL_PIN(WMT_PIN_MII0RXDV, "mii0_rxdv"),
+ PINCTRL_PIN(WMT_PIN_MII0RXERR, "mii0_rxerr"),
+ PINCTRL_PIN(WMT_PIN_MII0PHYRST, "mii0_phyrst"),
+ PINCTRL_PIN(WMT_PIN_MII0TXD0, "mii0_txd0"),
+ PINCTRL_PIN(WMT_PIN_MII0TXD1, "mii0_txd1"),
+ PINCTRL_PIN(WMT_PIN_MII0TXD2, "mii0_txd2"),
+ PINCTRL_PIN(WMT_PIN_MII0TXD3, "mii0_txd3"),
+ PINCTRL_PIN(WMT_PIN_MII0TXCLK, "mii0_txclk"),
+ PINCTRL_PIN(WMT_PIN_MII0TXEN, "mii0_txen"),
+ PINCTRL_PIN(WMT_PIN_MII0TXERR, "mii0_txerr"),
+ PINCTRL_PIN(WMT_PIN_MII0PHYPD, "mii0_phypd"),
+ PINCTRL_PIN(WMT_PIN_MII0COL, "mii0_col"),
+ PINCTRL_PIN(WMT_PIN_MII0CRS, "mii0_crs"),
+ PINCTRL_PIN(WMT_PIN_MII0MDIO, "mii0_mdio"),
+ PINCTRL_PIN(WMT_PIN_MII0MDC, "mii0_mdc"),
+ PINCTRL_PIN(WMT_PIN_SEECS, "see_cs"),
+ PINCTRL_PIN(WMT_PIN_SEECK, "see_ck"),
+ PINCTRL_PIN(WMT_PIN_SEEDI, "see_di"),
+ PINCTRL_PIN(WMT_PIN_SEEDO, "see_do"),
+ PINCTRL_PIN(WMT_PIN_IDEDREQ0, "ide_dreq0"),
+ PINCTRL_PIN(WMT_PIN_IDEDREQ1, "ide_dreq1"),
+ PINCTRL_PIN(WMT_PIN_IDEIOW, "ide_iow"),
+ PINCTRL_PIN(WMT_PIN_IDEIOR, "ide_ior"),
+ PINCTRL_PIN(WMT_PIN_IDEDACK, "ide_dack"),
+ PINCTRL_PIN(WMT_PIN_IDEIORDY, "ide_iordy"),
+ PINCTRL_PIN(WMT_PIN_IDEINTRQ, "ide_intrq"),
+ PINCTRL_PIN(WMT_PIN_VDIN0, "vdin0"),
+ PINCTRL_PIN(WMT_PIN_VDIN1, "vdin1"),
+ PINCTRL_PIN(WMT_PIN_VDIN2, "vdin2"),
+ PINCTRL_PIN(WMT_PIN_VDIN3, "vdin3"),
+ PINCTRL_PIN(WMT_PIN_VDIN4, "vdin4"),
+ PINCTRL_PIN(WMT_PIN_VDIN5, "vdin5"),
+ PINCTRL_PIN(WMT_PIN_VDIN6, "vdin6"),
+ PINCTRL_PIN(WMT_PIN_VDIN7, "vdin7"),
+ PINCTRL_PIN(WMT_PIN_VDOUT0, "vdout0"),
+ PINCTRL_PIN(WMT_PIN_VDOUT1, "vdout1"),
+ PINCTRL_PIN(WMT_PIN_VDOUT2, "vdout2"),
+ PINCTRL_PIN(WMT_PIN_VDOUT3, "vdout3"),
+ PINCTRL_PIN(WMT_PIN_VDOUT4, "vdout4"),
+ PINCTRL_PIN(WMT_PIN_VDOUT5, "vdout5"),
+ PINCTRL_PIN(WMT_PIN_NANDCLE0, "nand_cle0"),
+ PINCTRL_PIN(WMT_PIN_NANDCLE1, "nand_cle1"),
+ PINCTRL_PIN(WMT_PIN_VDOUT6_7, "vdout6_7"),
+ PINCTRL_PIN(WMT_PIN_VHSYNC, "vhsync"),
+ PINCTRL_PIN(WMT_PIN_VVSYNC, "vvsync"),
+ PINCTRL_PIN(WMT_PIN_TSDIN0, "tsdin0"),
+ PINCTRL_PIN(WMT_PIN_TSDIN1, "tsdin1"),
+ PINCTRL_PIN(WMT_PIN_TSDIN2, "tsdin2"),
+ PINCTRL_PIN(WMT_PIN_TSDIN3, "tsdin3"),
+ PINCTRL_PIN(WMT_PIN_TSDIN4, "tsdin4"),
+ PINCTRL_PIN(WMT_PIN_TSDIN5, "tsdin5"),
+ PINCTRL_PIN(WMT_PIN_TSDIN6, "tsdin6"),
+ PINCTRL_PIN(WMT_PIN_TSDIN7, "tsdin7"),
+ PINCTRL_PIN(WMT_PIN_TSSYNC, "tssync"),
+ PINCTRL_PIN(WMT_PIN_TSVALID, "tsvalid"),
+ PINCTRL_PIN(WMT_PIN_TSCLK, "tsclk"),
+ PINCTRL_PIN(WMT_PIN_LCDD0, "lcd_d0"),
+ PINCTRL_PIN(WMT_PIN_LCDD1, "lcd_d1"),
+ PINCTRL_PIN(WMT_PIN_LCDD2, "lcd_d2"),
+ PINCTRL_PIN(WMT_PIN_LCDD3, "lcd_d3"),
+ PINCTRL_PIN(WMT_PIN_LCDD4, "lcd_d4"),
+ PINCTRL_PIN(WMT_PIN_LCDD5, "lcd_d5"),
+ PINCTRL_PIN(WMT_PIN_LCDD6, "lcd_d6"),
+ PINCTRL_PIN(WMT_PIN_LCDD7, "lcd_d7"),
+ PINCTRL_PIN(WMT_PIN_LCDD8, "lcd_d8"),
+ PINCTRL_PIN(WMT_PIN_LCDD9, "lcd_d9"),
+ PINCTRL_PIN(WMT_PIN_LCDD10, "lcd_d10"),
+ PINCTRL_PIN(WMT_PIN_LCDD11, "lcd_d11"),
+ PINCTRL_PIN(WMT_PIN_LCDD12, "lcd_d12"),
+ PINCTRL_PIN(WMT_PIN_LCDD13, "lcd_d13"),
+ PINCTRL_PIN(WMT_PIN_LCDD14, "lcd_d14"),
+ PINCTRL_PIN(WMT_PIN_LCDD15, "lcd_d15"),
+ PINCTRL_PIN(WMT_PIN_LCDD16, "lcd_d16"),
+ PINCTRL_PIN(WMT_PIN_LCDD17, "lcd_d17"),
+ PINCTRL_PIN(WMT_PIN_LCDCLK, "lcd_clk"),
+ PINCTRL_PIN(WMT_PIN_LCDDEN, "lcd_den"),
+ PINCTRL_PIN(WMT_PIN_LCDLINE, "lcd_line"),
+ PINCTRL_PIN(WMT_PIN_LCDFRM, "lcd_frm"),
+ PINCTRL_PIN(WMT_PIN_LCDBIAS, "lcd_bias"),
+};
+
+/* Order of these names must match the above list */
+static const char * const vt8500_groups[] = {
+ "extgpio0",
+ "extgpio1",
+ "extgpio2",
+ "extgpio3",
+ "extgpio4",
+ "extgpio5",
+ "extgpio6",
+ "extgpio7",
+ "extgpio8",
+ "uart0_rts",
+ "uart0_txd",
+ "uart0_cts",
+ "uart0_rxd",
+ "uart1_rts",
+ "uart1_txd",
+ "uart1_cts",
+ "uart1_rxd",
+ "spi0_clk",
+ "spi0_ss",
+ "spi0_miso",
+ "spi0_mosi",
+ "spi1_clk",
+ "spi1_ss",
+ "spi1_miso",
+ "spi1_mosi",
+ "spi2_clk",
+ "spi2_ss",
+ "spi2_miso",
+ "spi2_mosi",
+ "sd_data0",
+ "sd_data1",
+ "sd_data2",
+ "sd_data3",
+ "mmc_data0",
+ "mmc_data1",
+ "mmc_data2",
+ "mmc_data3",
+ "sd_clk",
+ "sd_wp",
+ "sd_cmd",
+ "ms_data0",
+ "ms_data1",
+ "ms_data2",
+ "ms_data3",
+ "ms_clk",
+ "ms_bs",
+ "ms_ins",
+ "i2c0_scl",
+ "i2c0_sda",
+ "i2c1_scl",
+ "i2c1_sda",
+ "mii0_rxd0",
+ "mii0_rxd1",
+ "mii0_rxd2",
+ "mii0_rxd3",
+ "mii0_rxclk",
+ "mii0_rxdv",
+ "mii0_rxerr",
+ "mii0_phyrst",
+ "mii0_txd0",
+ "mii0_txd1",
+ "mii0_txd2",
+ "mii0_txd3",
+ "mii0_txclk",
+ "mii0_txen",
+ "mii0_txerr",
+ "mii0_phypd",
+ "mii0_col",
+ "mii0_crs",
+ "mii0_mdio",
+ "mii0_mdc",
+ "see_cs",
+ "see_ck",
+ "see_di",
+ "see_do",
+ "ide_dreq0",
+ "ide_dreq1",
+ "ide_iow",
+ "ide_ior",
+ "ide_dack",
+ "ide_iordy",
+ "ide_intrq",
+ "vdin0",
+ "vdin1",
+ "vdin2",
+ "vdin3",
+ "vdin4",
+ "vdin5",
+ "vdin6",
+ "vdin7",
+ "vdout0",
+ "vdout1",
+ "vdout2",
+ "vdout3",
+ "vdout4",
+ "vdout5",
+ "nand_cle0",
+ "nand_cle1",
+ "vdout6_7",
+ "vhsync",
+ "vvsync",
+ "tsdin0",
+ "tsdin1",
+ "tsdin2",
+ "tsdin3",
+ "tsdin4",
+ "tsdin5",
+ "tsdin6",
+ "tsdin7",
+ "tssync",
+ "tsvalid",
+ "tsclk",
+ "lcd_d0",
+ "lcd_d1",
+ "lcd_d2",
+ "lcd_d3",
+ "lcd_d4",
+ "lcd_d5",
+ "lcd_d6",
+ "lcd_d7",
+ "lcd_d8",
+ "lcd_d9",
+ "lcd_d10",
+ "lcd_d11",
+ "lcd_d12",
+ "lcd_d13",
+ "lcd_d14",
+ "lcd_d15",
+ "lcd_d16",
+ "lcd_d17",
+ "lcd_clk",
+ "lcd_den",
+ "lcd_line",
+ "lcd_frm",
+ "lcd_bias",
+};
+
+static int vt8500_pinctrl_probe(struct platform_device *pdev)
+{
+ struct wmt_pinctrl_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate data\n");
+ return -ENOMEM;
+ }
+
+ data->banks = vt8500_banks;
+ data->nbanks = ARRAY_SIZE(vt8500_banks);
+ data->pins = vt8500_pins;
+ data->npins = ARRAY_SIZE(vt8500_pins);
+ data->groups = vt8500_groups;
+ data->ngroups = ARRAY_SIZE(vt8500_groups);
+
+ return wmt_pinctrl_probe(pdev, data);
+}
+
+static int vt8500_pinctrl_remove(struct platform_device *pdev)
+{
+ return wmt_pinctrl_remove(pdev);
+}
+
+static struct of_device_id wmt_pinctrl_of_match[] = {
+ { .compatible = "via,vt8500-pinctrl" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver wmt_pinctrl_driver = {
+ .probe = vt8500_pinctrl_probe,
+ .remove = vt8500_pinctrl_remove,
+ .driver = {
+ .name = "pinctrl-vt8500",
+ .owner = THIS_MODULE,
+ .of_match_table = wmt_pinctrl_of_match,
+ },
+};
+
+module_platform_driver(wmt_pinctrl_driver);
+
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_DESCRIPTION("VIA VT8500 Pincontrol driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8505.c b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
new file mode 100644
index 0000000..483ba73
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
@@ -0,0 +1,532 @@
+/*
+ * Pinctrl data for Wondermedia WM8505 SoC
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-wmt.h"
+
+/*
+ * Describe the register offsets within the GPIO memory space
+ * The dedicated external GPIO's should always be listed in bank 0
+ * so they are exported in the 0..31 range which is what users
+ * expect.
+ *
+ * Do not reorder these banks as it will change the pin numbering
+ */
+static const struct wmt_pinctrl_bank_registers wm8505_banks[] = {
+ WMT_PINCTRL_BANK(0x64, 0x8C, 0xB4, 0xDC, NO_REG, NO_REG), /* 0 */
+ WMT_PINCTRL_BANK(0x40, 0x68, 0x90, 0xB8, NO_REG, NO_REG), /* 1 */
+ WMT_PINCTRL_BANK(0x44, 0x6C, 0x94, 0xBC, NO_REG, NO_REG), /* 2 */
+ WMT_PINCTRL_BANK(0x48, 0x70, 0x98, 0xC0, NO_REG, NO_REG), /* 3 */
+ WMT_PINCTRL_BANK(0x4C, 0x74, 0x9C, 0xC4, NO_REG, NO_REG), /* 4 */
+ WMT_PINCTRL_BANK(0x50, 0x78, 0xA0, 0xC8, NO_REG, NO_REG), /* 5 */
+ WMT_PINCTRL_BANK(0x54, 0x7C, 0xA4, 0xD0, NO_REG, NO_REG), /* 6 */
+ WMT_PINCTRL_BANK(0x58, 0x80, 0xA8, 0xD4, NO_REG, NO_REG), /* 7 */
+ WMT_PINCTRL_BANK(0x5C, 0x84, 0xAC, 0xD8, NO_REG, NO_REG), /* 8 */
+ WMT_PINCTRL_BANK(0x60, 0x88, 0xB0, 0xDC, NO_REG, NO_REG), /* 9 */
+ WMT_PINCTRL_BANK(0x500, 0x504, 0x508, 0x50C, NO_REG, NO_REG), /* 10 */
+};
+
+/* Please keep sorted by bank/bit */
+#define WMT_PIN_EXTGPIO0 WMT_PIN(0, 0)
+#define WMT_PIN_EXTGPIO1 WMT_PIN(0, 1)
+#define WMT_PIN_EXTGPIO2 WMT_PIN(0, 2)
+#define WMT_PIN_EXTGPIO3 WMT_PIN(0, 3)
+#define WMT_PIN_EXTGPIO4 WMT_PIN(0, 4)
+#define WMT_PIN_EXTGPIO5 WMT_PIN(0, 5)
+#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
+#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
+#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17)
+#define WMT_PIN_WAKEUP2 WMT_PIN(0, 18)
+#define WMT_PIN_WAKEUP3 WMT_PIN(0, 19)
+#define WMT_PIN_SUSGPIO0 WMT_PIN(0, 21)
+#define WMT_PIN_SDDATA0 WMT_PIN(1, 0)
+#define WMT_PIN_SDDATA1 WMT_PIN(1, 1)
+#define WMT_PIN_SDDATA2 WMT_PIN(1, 2)
+#define WMT_PIN_SDDATA3 WMT_PIN(1, 3)
+#define WMT_PIN_MMCDATA0 WMT_PIN(1, 4)
+#define WMT_PIN_MMCDATA1 WMT_PIN(1, 5)
+#define WMT_PIN_MMCDATA2 WMT_PIN(1, 6)
+#define WMT_PIN_MMCDATA3 WMT_PIN(1, 7)
+#define WMT_PIN_VDIN0 WMT_PIN(2, 0)
+#define WMT_PIN_VDIN1 WMT_PIN(2, 1)
+#define WMT_PIN_VDIN2 WMT_PIN(2, 2)
+#define WMT_PIN_VDIN3 WMT_PIN(2, 3)
+#define WMT_PIN_VDIN4 WMT_PIN(2, 4)
+#define WMT_PIN_VDIN5 WMT_PIN(2, 5)
+#define WMT_PIN_VDIN6 WMT_PIN(2, 6)
+#define WMT_PIN_VDIN7 WMT_PIN(2, 7)
+#define WMT_PIN_VDOUT0 WMT_PIN(2, 8)
+#define WMT_PIN_VDOUT1 WMT_PIN(2, 9)
+#define WMT_PIN_VDOUT2 WMT_PIN(2, 10)
+#define WMT_PIN_VDOUT3 WMT_PIN(2, 11)
+#define WMT_PIN_VDOUT4 WMT_PIN(2, 12)
+#define WMT_PIN_VDOUT5 WMT_PIN(2, 13)
+#define WMT_PIN_VDOUT6 WMT_PIN(2, 14)
+#define WMT_PIN_VDOUT7 WMT_PIN(2, 15)
+#define WMT_PIN_VDOUT8 WMT_PIN(2, 16)
+#define WMT_PIN_VDOUT9 WMT_PIN(2, 17)
+#define WMT_PIN_VDOUT10 WMT_PIN(2, 18)
+#define WMT_PIN_VDOUT11 WMT_PIN(2, 19)
+#define WMT_PIN_VDOUT12 WMT_PIN(2, 20)
+#define WMT_PIN_VDOUT13 WMT_PIN(2, 21)
+#define WMT_PIN_VDOUT14 WMT_PIN(2, 22)
+#define WMT_PIN_VDOUT15 WMT_PIN(2, 23)
+#define WMT_PIN_VDOUT16 WMT_PIN(2, 24)
+#define WMT_PIN_VDOUT17 WMT_PIN(2, 25)
+#define WMT_PIN_VDOUT18 WMT_PIN(2, 26)
+#define WMT_PIN_VDOUT19 WMT_PIN(2, 27)
+#define WMT_PIN_VDOUT20 WMT_PIN(2, 28)
+#define WMT_PIN_VDOUT21 WMT_PIN(2, 29)
+#define WMT_PIN_VDOUT22 WMT_PIN(2, 30)
+#define WMT_PIN_VDOUT23 WMT_PIN(2, 31)
+#define WMT_PIN_VHSYNC WMT_PIN(3, 0)
+#define WMT_PIN_VVSYNC WMT_PIN(3, 1)
+#define WMT_PIN_VGAHSYNC WMT_PIN(3, 2)
+#define WMT_PIN_VGAVSYNC WMT_PIN(3, 3)
+#define WMT_PIN_VDHSYNC WMT_PIN(3, 4)
+#define WMT_PIN_VDVSYNC WMT_PIN(3, 5)
+#define WMT_PIN_NORD0 WMT_PIN(4, 0)
+#define WMT_PIN_NORD1 WMT_PIN(4, 1)
+#define WMT_PIN_NORD2 WMT_PIN(4, 2)
+#define WMT_PIN_NORD3 WMT_PIN(4, 3)
+#define WMT_PIN_NORD4 WMT_PIN(4, 4)
+#define WMT_PIN_NORD5 WMT_PIN(4, 5)
+#define WMT_PIN_NORD6 WMT_PIN(4, 6)
+#define WMT_PIN_NORD7 WMT_PIN(4, 7)
+#define WMT_PIN_NORD8 WMT_PIN(4, 8)
+#define WMT_PIN_NORD9 WMT_PIN(4, 9)
+#define WMT_PIN_NORD10 WMT_PIN(4, 10)
+#define WMT_PIN_NORD11 WMT_PIN(4, 11)
+#define WMT_PIN_NORD12 WMT_PIN(4, 12)
+#define WMT_PIN_NORD13 WMT_PIN(4, 13)
+#define WMT_PIN_NORD14 WMT_PIN(4, 14)
+#define WMT_PIN_NORD15 WMT_PIN(4, 15)
+#define WMT_PIN_NORA0 WMT_PIN(5, 0)
+#define WMT_PIN_NORA1 WMT_PIN(5, 1)
+#define WMT_PIN_NORA2 WMT_PIN(5, 2)
+#define WMT_PIN_NORA3 WMT_PIN(5, 3)
+#define WMT_PIN_NORA4 WMT_PIN(5, 4)
+#define WMT_PIN_NORA5 WMT_PIN(5, 5)
+#define WMT_PIN_NORA6 WMT_PIN(5, 6)
+#define WMT_PIN_NORA7 WMT_PIN(5, 7)
+#define WMT_PIN_NORA8 WMT_PIN(5, 8)
+#define WMT_PIN_NORA9 WMT_PIN(5, 9)
+#define WMT_PIN_NORA10 WMT_PIN(5, 10)
+#define WMT_PIN_NORA11 WMT_PIN(5, 11)
+#define WMT_PIN_NORA12 WMT_PIN(5, 12)
+#define WMT_PIN_NORA13 WMT_PIN(5, 13)
+#define WMT_PIN_NORA14 WMT_PIN(5, 14)
+#define WMT_PIN_NORA15 WMT_PIN(5, 15)
+#define WMT_PIN_NORA16 WMT_PIN(5, 16)
+#define WMT_PIN_NORA17 WMT_PIN(5, 17)
+#define WMT_PIN_NORA18 WMT_PIN(5, 18)
+#define WMT_PIN_NORA19 WMT_PIN(5, 19)
+#define WMT_PIN_NORA20 WMT_PIN(5, 20)
+#define WMT_PIN_NORA21 WMT_PIN(5, 21)
+#define WMT_PIN_NORA22 WMT_PIN(5, 22)
+#define WMT_PIN_NORA23 WMT_PIN(5, 23)
+#define WMT_PIN_NORA24 WMT_PIN(5, 24)
+#define WMT_PIN_AC97SDI WMT_PIN(6, 0)
+#define WMT_PIN_AC97SYNC WMT_PIN(6, 1)
+#define WMT_PIN_AC97SDO WMT_PIN(6, 2)
+#define WMT_PIN_AC97BCLK WMT_PIN(6, 3)
+#define WMT_PIN_AC97RST WMT_PIN(6, 4)
+#define WMT_PIN_SFDO WMT_PIN(7, 0)
+#define WMT_PIN_SFCS0 WMT_PIN(7, 1)
+#define WMT_PIN_SFCS1 WMT_PIN(7, 2)
+#define WMT_PIN_SFCLK WMT_PIN(7, 3)
+#define WMT_PIN_SFDI WMT_PIN(7, 4)
+#define WMT_PIN_SPI0CLK WMT_PIN(8, 0)
+#define WMT_PIN_SPI0MISO WMT_PIN(8, 1)
+#define WMT_PIN_SPI0MOSI WMT_PIN(8, 2)
+#define WMT_PIN_SPI0SS WMT_PIN(8, 3)
+#define WMT_PIN_SPI1CLK WMT_PIN(8, 4)
+#define WMT_PIN_SPI1MISO WMT_PIN(8, 5)
+#define WMT_PIN_SPI1MOSI WMT_PIN(8, 6)
+#define WMT_PIN_SPI1SS WMT_PIN(8, 7)
+#define WMT_PIN_SPI2CLK WMT_PIN(8, 8)
+#define WMT_PIN_SPI2MISO WMT_PIN(8, 9)
+#define WMT_PIN_SPI2MOSI WMT_PIN(8, 10)
+#define WMT_PIN_SPI2SS WMT_PIN(8, 11)
+#define WMT_PIN_UART0_RTS WMT_PIN(9, 0)
+#define WMT_PIN_UART0_TXD WMT_PIN(9, 1)
+#define WMT_PIN_UART0_CTS WMT_PIN(9, 2)
+#define WMT_PIN_UART0_RXD WMT_PIN(9, 3)
+#define WMT_PIN_UART1_RTS WMT_PIN(9, 4)
+#define WMT_PIN_UART1_TXD WMT_PIN(9, 5)
+#define WMT_PIN_UART1_CTS WMT_PIN(9, 6)
+#define WMT_PIN_UART1_RXD WMT_PIN(9, 7)
+#define WMT_PIN_UART2_RTS WMT_PIN(9, 8)
+#define WMT_PIN_UART2_TXD WMT_PIN(9, 9)
+#define WMT_PIN_UART2_CTS WMT_PIN(9, 10)
+#define WMT_PIN_UART2_RXD WMT_PIN(9, 11)
+#define WMT_PIN_UART3_RTS WMT_PIN(9, 12)
+#define WMT_PIN_UART3_TXD WMT_PIN(9, 13)
+#define WMT_PIN_UART3_CTS WMT_PIN(9, 14)
+#define WMT_PIN_UART3_RXD WMT_PIN(9, 15)
+#define WMT_PIN_I2C0SCL WMT_PIN(10, 0)
+#define WMT_PIN_I2C0SDA WMT_PIN(10, 1)
+#define WMT_PIN_I2C1SCL WMT_PIN(10, 2)
+#define WMT_PIN_I2C1SDA WMT_PIN(10, 3)
+#define WMT_PIN_I2C2SCL WMT_PIN(10, 4)
+#define WMT_PIN_I2C2SDA WMT_PIN(10, 5)
+
+static const struct pinctrl_pin_desc wm8505_pins[] = {
+ PINCTRL_PIN(WMT_PIN_EXTGPIO0, "extgpio0"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO1, "extgpio1"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO2, "extgpio2"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO3, "extgpio3"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO4, "extgpio4"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO5, "extgpio5"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO6, "extgpio6"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO7, "extgpio7"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP0, "wakeup0"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP1, "wakeup1"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP2, "wakeup2"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP3, "wakeup3"),
+ PINCTRL_PIN(WMT_PIN_SUSGPIO0, "susgpio0"),
+ PINCTRL_PIN(WMT_PIN_SDDATA0, "sd_data0"),
+ PINCTRL_PIN(WMT_PIN_SDDATA1, "sd_data1"),
+ PINCTRL_PIN(WMT_PIN_SDDATA2, "sd_data2"),
+ PINCTRL_PIN(WMT_PIN_SDDATA3, "sd_data3"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA0, "mmc_data0"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA1, "mmc_data1"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA2, "mmc_data2"),
+ PINCTRL_PIN(WMT_PIN_MMCDATA3, "mmc_data3"),
+ PINCTRL_PIN(WMT_PIN_VDIN0, "vdin0"),
+ PINCTRL_PIN(WMT_PIN_VDIN1, "vdin1"),
+ PINCTRL_PIN(WMT_PIN_VDIN2, "vdin2"),
+ PINCTRL_PIN(WMT_PIN_VDIN3, "vdin3"),
+ PINCTRL_PIN(WMT_PIN_VDIN4, "vdin4"),
+ PINCTRL_PIN(WMT_PIN_VDIN5, "vdin5"),
+ PINCTRL_PIN(WMT_PIN_VDIN6, "vdin6"),
+ PINCTRL_PIN(WMT_PIN_VDIN7, "vdin7"),
+ PINCTRL_PIN(WMT_PIN_VDOUT0, "vdout0"),
+ PINCTRL_PIN(WMT_PIN_VDOUT1, "vdout1"),
+ PINCTRL_PIN(WMT_PIN_VDOUT2, "vdout2"),
+ PINCTRL_PIN(WMT_PIN_VDOUT3, "vdout3"),
+ PINCTRL_PIN(WMT_PIN_VDOUT4, "vdout4"),
+ PINCTRL_PIN(WMT_PIN_VDOUT5, "vdout5"),
+ PINCTRL_PIN(WMT_PIN_VDOUT6, "vdout6"),
+ PINCTRL_PIN(WMT_PIN_VDOUT7, "vdout7"),
+ PINCTRL_PIN(WMT_PIN_VDOUT8, "vdout8"),
+ PINCTRL_PIN(WMT_PIN_VDOUT9, "vdout9"),
+ PINCTRL_PIN(WMT_PIN_VDOUT10, "vdout10"),
+ PINCTRL_PIN(WMT_PIN_VDOUT11, "vdout11"),
+ PINCTRL_PIN(WMT_PIN_VDOUT12, "vdout12"),
+ PINCTRL_PIN(WMT_PIN_VDOUT13, "vdout13"),
+ PINCTRL_PIN(WMT_PIN_VDOUT14, "vdout14"),
+ PINCTRL_PIN(WMT_PIN_VDOUT15, "vdout15"),
+ PINCTRL_PIN(WMT_PIN_VDOUT16, "vdout16"),
+ PINCTRL_PIN(WMT_PIN_VDOUT17, "vdout17"),
+ PINCTRL_PIN(WMT_PIN_VDOUT18, "vdout18"),
+ PINCTRL_PIN(WMT_PIN_VDOUT19, "vdout19"),
+ PINCTRL_PIN(WMT_PIN_VDOUT20, "vdout20"),
+ PINCTRL_PIN(WMT_PIN_VDOUT21, "vdout21"),
+ PINCTRL_PIN(WMT_PIN_VDOUT22, "vdout22"),
+ PINCTRL_PIN(WMT_PIN_VDOUT23, "vdout23"),
+ PINCTRL_PIN(WMT_PIN_VHSYNC, "v_hsync"),
+ PINCTRL_PIN(WMT_PIN_VVSYNC, "v_vsync"),
+ PINCTRL_PIN(WMT_PIN_VGAHSYNC, "vga_hsync"),
+ PINCTRL_PIN(WMT_PIN_VGAVSYNC, "vga_vsync"),
+ PINCTRL_PIN(WMT_PIN_VDHSYNC, "vd_hsync"),
+ PINCTRL_PIN(WMT_PIN_VDVSYNC, "vd_vsync"),
+ PINCTRL_PIN(WMT_PIN_NORD0, "nor_d0"),
+ PINCTRL_PIN(WMT_PIN_NORD1, "nor_d1"),
+ PINCTRL_PIN(WMT_PIN_NORD2, "nor_d2"),
+ PINCTRL_PIN(WMT_PIN_NORD3, "nor_d3"),
+ PINCTRL_PIN(WMT_PIN_NORD4, "nor_d4"),
+ PINCTRL_PIN(WMT_PIN_NORD5, "nor_d5"),
+ PINCTRL_PIN(WMT_PIN_NORD6, "nor_d6"),
+ PINCTRL_PIN(WMT_PIN_NORD7, "nor_d7"),
+ PINCTRL_PIN(WMT_PIN_NORD8, "nor_d8"),
+ PINCTRL_PIN(WMT_PIN_NORD9, "nor_d9"),
+ PINCTRL_PIN(WMT_PIN_NORD10, "nor_d10"),
+ PINCTRL_PIN(WMT_PIN_NORD11, "nor_d11"),
+ PINCTRL_PIN(WMT_PIN_NORD12, "nor_d12"),
+ PINCTRL_PIN(WMT_PIN_NORD13, "nor_d13"),
+ PINCTRL_PIN(WMT_PIN_NORD14, "nor_d14"),
+ PINCTRL_PIN(WMT_PIN_NORD15, "nor_d15"),
+ PINCTRL_PIN(WMT_PIN_NORA0, "nor_a0"),
+ PINCTRL_PIN(WMT_PIN_NORA1, "nor_a1"),
+ PINCTRL_PIN(WMT_PIN_NORA2, "nor_a2"),
+ PINCTRL_PIN(WMT_PIN_NORA3, "nor_a3"),
+ PINCTRL_PIN(WMT_PIN_NORA4, "nor_a4"),
+ PINCTRL_PIN(WMT_PIN_NORA5, "nor_a5"),
+ PINCTRL_PIN(WMT_PIN_NORA6, "nor_a6"),
+ PINCTRL_PIN(WMT_PIN_NORA7, "nor_a7"),
+ PINCTRL_PIN(WMT_PIN_NORA8, "nor_a8"),
+ PINCTRL_PIN(WMT_PIN_NORA9, "nor_a9"),
+ PINCTRL_PIN(WMT_PIN_NORA10, "nor_a10"),
+ PINCTRL_PIN(WMT_PIN_NORA11, "nor_a11"),
+ PINCTRL_PIN(WMT_PIN_NORA12, "nor_a12"),
+ PINCTRL_PIN(WMT_PIN_NORA13, "nor_a13"),
+ PINCTRL_PIN(WMT_PIN_NORA14, "nor_a14"),
+ PINCTRL_PIN(WMT_PIN_NORA15, "nor_a15"),
+ PINCTRL_PIN(WMT_PIN_NORA16, "nor_a16"),
+ PINCTRL_PIN(WMT_PIN_NORA17, "nor_a17"),
+ PINCTRL_PIN(WMT_PIN_NORA18, "nor_a18"),
+ PINCTRL_PIN(WMT_PIN_NORA19, "nor_a19"),
+ PINCTRL_PIN(WMT_PIN_NORA20, "nor_a20"),
+ PINCTRL_PIN(WMT_PIN_NORA21, "nor_a21"),
+ PINCTRL_PIN(WMT_PIN_NORA22, "nor_a22"),
+ PINCTRL_PIN(WMT_PIN_NORA23, "nor_a23"),
+ PINCTRL_PIN(WMT_PIN_NORA24, "nor_a24"),
+ PINCTRL_PIN(WMT_PIN_AC97SDI, "ac97_sdi"),
+ PINCTRL_PIN(WMT_PIN_AC97SYNC, "ac97_sync"),
+ PINCTRL_PIN(WMT_PIN_AC97SDO, "ac97_sdo"),
+ PINCTRL_PIN(WMT_PIN_AC97BCLK, "ac97_bclk"),
+ PINCTRL_PIN(WMT_PIN_AC97RST, "ac97_rst"),
+ PINCTRL_PIN(WMT_PIN_SFDO, "sf_do"),
+ PINCTRL_PIN(WMT_PIN_SFCS0, "sf_cs0"),
+ PINCTRL_PIN(WMT_PIN_SFCS1, "sf_cs1"),
+ PINCTRL_PIN(WMT_PIN_SFCLK, "sf_clk"),
+ PINCTRL_PIN(WMT_PIN_SFDI, "sf_di"),
+ PINCTRL_PIN(WMT_PIN_SPI0CLK, "spi0_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI0MISO, "spi0_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI0MOSI, "spi0_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI0SS, "spi0_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI1CLK, "spi1_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI1MISO, "spi1_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI1MOSI, "spi1_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI1SS, "spi1_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI2CLK, "spi2_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI2MISO, "spi2_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI2MOSI, "spi2_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI2SS, "spi2_ss"),
+ PINCTRL_PIN(WMT_PIN_UART0_RTS, "uart0_rts"),
+ PINCTRL_PIN(WMT_PIN_UART0_TXD, "uart0_txd"),
+ PINCTRL_PIN(WMT_PIN_UART0_CTS, "uart0_cts"),
+ PINCTRL_PIN(WMT_PIN_UART0_RXD, "uart0_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART1_RTS, "uart1_rts"),
+ PINCTRL_PIN(WMT_PIN_UART1_TXD, "uart1_txd"),
+ PINCTRL_PIN(WMT_PIN_UART1_CTS, "uart1_cts"),
+ PINCTRL_PIN(WMT_PIN_UART1_RXD, "uart1_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART2_RTS, "uart2_rts"),
+ PINCTRL_PIN(WMT_PIN_UART2_TXD, "uart2_txd"),
+ PINCTRL_PIN(WMT_PIN_UART2_CTS, "uart2_cts"),
+ PINCTRL_PIN(WMT_PIN_UART2_RXD, "uart2_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART3_RTS, "uart3_rts"),
+ PINCTRL_PIN(WMT_PIN_UART3_TXD, "uart3_txd"),
+ PINCTRL_PIN(WMT_PIN_UART3_CTS, "uart3_cts"),
+ PINCTRL_PIN(WMT_PIN_UART3_RXD, "uart3_rxd"),
+ PINCTRL_PIN(WMT_PIN_I2C0SCL, "i2c0_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C0SDA, "i2c0_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C1SCL, "i2c1_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C1SDA, "i2c1_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C2SCL, "i2c2_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C2SDA, "i2c2_sda"),
+};
+
+/* Order of these names must match the above list */
+static const char * const wm8505_groups[] = {
+ "extgpio0",
+ "extgpio1",
+ "extgpio2",
+ "extgpio3",
+ "extgpio4",
+ "extgpio5",
+ "extgpio6",
+ "extgpio7",
+ "wakeup0",
+ "wakeup1",
+ "wakeup2",
+ "wakeup3",
+ "susgpio0",
+ "sd_data0",
+ "sd_data1",
+ "sd_data2",
+ "sd_data3",
+ "mmc_data0",
+ "mmc_data1",
+ "mmc_data2",
+ "mmc_data3",
+ "vdin0",
+ "vdin1",
+ "vdin2",
+ "vdin3",
+ "vdin4",
+ "vdin5",
+ "vdin6",
+ "vdin7",
+ "vdout0",
+ "vdout1",
+ "vdout2",
+ "vdout3",
+ "vdout4",
+ "vdout5",
+ "vdout6",
+ "vdout7",
+ "vdout8",
+ "vdout9",
+ "vdout10",
+ "vdout11",
+ "vdout12",
+ "vdout13",
+ "vdout14",
+ "vdout15",
+ "vdout16",
+ "vdout17",
+ "vdout18",
+ "vdout19",
+ "vdout20",
+ "vdout21",
+ "vdout22",
+ "vdout23",
+ "v_hsync",
+ "v_vsync",
+ "vga_hsync",
+ "vga_vsync",
+ "vd_hsync",
+ "vd_vsync",
+ "nor_d0",
+ "nor_d1",
+ "nor_d2",
+ "nor_d3",
+ "nor_d4",
+ "nor_d5",
+ "nor_d6",
+ "nor_d7",
+ "nor_d8",
+ "nor_d9",
+ "nor_d10",
+ "nor_d11",
+ "nor_d12",
+ "nor_d13",
+ "nor_d14",
+ "nor_d15",
+ "nor_a0",
+ "nor_a1",
+ "nor_a2",
+ "nor_a3",
+ "nor_a4",
+ "nor_a5",
+ "nor_a6",
+ "nor_a7",
+ "nor_a8",
+ "nor_a9",
+ "nor_a10",
+ "nor_a11",
+ "nor_a12",
+ "nor_a13",
+ "nor_a14",
+ "nor_a15",
+ "nor_a16",
+ "nor_a17",
+ "nor_a18",
+ "nor_a19",
+ "nor_a20",
+ "nor_a21",
+ "nor_a22",
+ "nor_a23",
+ "nor_a24",
+ "ac97_sdi",
+ "ac97_sync",
+ "ac97_sdo",
+ "ac97_bclk",
+ "ac97_rst",
+ "sf_do",
+ "sf_cs0",
+ "sf_cs1",
+ "sf_clk",
+ "sf_di",
+ "spi0_clk",
+ "spi0_miso",
+ "spi0_mosi",
+ "spi0_ss",
+ "spi1_clk",
+ "spi1_miso",
+ "spi1_mosi",
+ "spi1_ss",
+ "spi2_clk",
+ "spi2_miso",
+ "spi2_mosi",
+ "spi2_ss",
+ "uart0_rts",
+ "uart0_txd",
+ "uart0_cts",
+ "uart0_rxd",
+ "uart1_rts",
+ "uart1_txd",
+ "uart1_cts",
+ "uart1_rxd",
+ "uart2_rts",
+ "uart2_txd",
+ "uart2_cts",
+ "uart2_rxd",
+ "uart3_rts",
+ "uart3_txd",
+ "uart3_cts",
+ "uart3_rxd",
+ "i2c0_scl",
+ "i2c0_sda",
+ "i2c1_scl",
+ "i2c1_sda",
+ "i2c2_scl",
+ "i2c2_sda",
+};
+
+static int wm8505_pinctrl_probe(struct platform_device *pdev)
+{
+ struct wmt_pinctrl_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate data\n");
+ return -ENOMEM;
+ }
+
+ data->banks = wm8505_banks;
+ data->nbanks = ARRAY_SIZE(wm8505_banks);
+ data->pins = wm8505_pins;
+ data->npins = ARRAY_SIZE(wm8505_pins);
+ data->groups = wm8505_groups;
+ data->ngroups = ARRAY_SIZE(wm8505_groups);
+
+ return wmt_pinctrl_probe(pdev, data);
+}
+
+static int wm8505_pinctrl_remove(struct platform_device *pdev)
+{
+ return wmt_pinctrl_remove(pdev);
+}
+
+static struct of_device_id wmt_pinctrl_of_match[] = {
+ { .compatible = "wm,wm8505-pinctrl" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver wmt_pinctrl_driver = {
+ .probe = wm8505_pinctrl_probe,
+ .remove = wm8505_pinctrl_remove,
+ .driver = {
+ .name = "pinctrl-wm8505",
+ .owner = THIS_MODULE,
+ .of_match_table = wmt_pinctrl_of_match,
+ },
+};
+
+module_platform_driver(wmt_pinctrl_driver);
+
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_DESCRIPTION("Wondermedia WM8505 Pincontrol driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8650.c b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
new file mode 100644
index 0000000..7de57f0
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
@@ -0,0 +1,370 @@
+/*
+ * Pinctrl data for Wondermedia WM8650 SoC
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-wmt.h"
+
+/*
+ * Describe the register offsets within the GPIO memory space
+ * The dedicated external GPIO's should always be listed in bank 0
+ * so they are exported in the 0..31 range which is what users
+ * expect.
+ *
+ * Do not reorder these banks as it will change the pin numbering
+ */
+static const struct wmt_pinctrl_bank_registers wm8650_banks[] = {
+ WMT_PINCTRL_BANK(0x40, 0x80, 0xC0, 0x00, 0x480, 0x4C0), /* 0 */
+ WMT_PINCTRL_BANK(0x44, 0x84, 0xC4, 0x04, 0x484, 0x4C4), /* 1 */
+ WMT_PINCTRL_BANK(0x48, 0x88, 0xC8, 0x08, 0x488, 0x4C8), /* 2 */
+ WMT_PINCTRL_BANK(0x4C, 0x8C, 0xCC, 0x0C, 0x48C, 0x4CC), /* 3 */
+ WMT_PINCTRL_BANK(0x50, 0x90, 0xD0, 0x10, 0x490, 0x4D0), /* 4 */
+ WMT_PINCTRL_BANK(0x54, 0x94, 0xD4, 0x14, 0x494, 0x4D4), /* 5 */
+ WMT_PINCTRL_BANK(0x58, 0x98, 0xD8, 0x18, 0x498, 0x4D8), /* 6 */
+ WMT_PINCTRL_BANK(0x5C, 0x9C, 0xDC, 0x1C, 0x49C, 0x4DC), /* 7 */
+};
+
+/* Please keep sorted by bank/bit */
+#define WMT_PIN_EXTGPIO0 WMT_PIN(0, 0)
+#define WMT_PIN_EXTGPIO1 WMT_PIN(0, 1)
+#define WMT_PIN_EXTGPIO2 WMT_PIN(0, 2)
+#define WMT_PIN_EXTGPIO3 WMT_PIN(0, 3)
+#define WMT_PIN_EXTGPIO4 WMT_PIN(0, 4)
+#define WMT_PIN_EXTGPIO5 WMT_PIN(0, 5)
+#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
+#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
+#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17)
+#define WMT_PIN_SUSGPIO0 WMT_PIN(0, 21)
+#define WMT_PIN_SD0CD WMT_PIN(0, 28)
+#define WMT_PIN_SD1CD WMT_PIN(0, 29)
+#define WMT_PIN_VDOUT0 WMT_PIN(1, 0)
+#define WMT_PIN_VDOUT1 WMT_PIN(1, 1)
+#define WMT_PIN_VDOUT2 WMT_PIN(1, 2)
+#define WMT_PIN_VDOUT3 WMT_PIN(1, 3)
+#define WMT_PIN_VDOUT4 WMT_PIN(1, 4)
+#define WMT_PIN_VDOUT5 WMT_PIN(1, 5)
+#define WMT_PIN_VDOUT6 WMT_PIN(1, 6)
+#define WMT_PIN_VDOUT7 WMT_PIN(1, 7)
+#define WMT_PIN_VDOUT8 WMT_PIN(1, 8)
+#define WMT_PIN_VDOUT9 WMT_PIN(1, 9)
+#define WMT_PIN_VDOUT10 WMT_PIN(1, 10)
+#define WMT_PIN_VDOUT11 WMT_PIN(1, 11)
+#define WMT_PIN_VDOUT12 WMT_PIN(1, 12)
+#define WMT_PIN_VDOUT13 WMT_PIN(1, 13)
+#define WMT_PIN_VDOUT14 WMT_PIN(1, 14)
+#define WMT_PIN_VDOUT15 WMT_PIN(1, 15)
+#define WMT_PIN_VDOUT16 WMT_PIN(1, 16)
+#define WMT_PIN_VDOUT17 WMT_PIN(1, 17)
+#define WMT_PIN_VDOUT18 WMT_PIN(1, 18)
+#define WMT_PIN_VDOUT19 WMT_PIN(1, 19)
+#define WMT_PIN_VDOUT20 WMT_PIN(1, 20)
+#define WMT_PIN_VDOUT21 WMT_PIN(1, 21)
+#define WMT_PIN_VDOUT22 WMT_PIN(1, 22)
+#define WMT_PIN_VDOUT23 WMT_PIN(1, 23)
+#define WMT_PIN_VDIN0 WMT_PIN(2, 0)
+#define WMT_PIN_VDIN1 WMT_PIN(2, 1)
+#define WMT_PIN_VDIN2 WMT_PIN(2, 2)
+#define WMT_PIN_VDIN3 WMT_PIN(2, 3)
+#define WMT_PIN_VDIN4 WMT_PIN(2, 4)
+#define WMT_PIN_VDIN5 WMT_PIN(2, 5)
+#define WMT_PIN_VDIN6 WMT_PIN(2, 6)
+#define WMT_PIN_VDIN7 WMT_PIN(2, 7)
+#define WMT_PIN_I2C1SCL WMT_PIN(2, 12)
+#define WMT_PIN_I2C1SDA WMT_PIN(2, 13)
+#define WMT_PIN_SPI0MOSI WMT_PIN(2, 24)
+#define WMT_PIN_SPI0MISO WMT_PIN(2, 25)
+#define WMT_PIN_SPI0SS0 WMT_PIN(2, 26)
+#define WMT_PIN_SPI0CLK WMT_PIN(2, 27)
+#define WMT_PIN_SD0DATA0 WMT_PIN(3, 8)
+#define WMT_PIN_SD0DATA1 WMT_PIN(3, 9)
+#define WMT_PIN_SD0DATA2 WMT_PIN(3, 10)
+#define WMT_PIN_SD0DATA3 WMT_PIN(3, 11)
+#define WMT_PIN_SD0CLK WMT_PIN(3, 12)
+#define WMT_PIN_SD0WP WMT_PIN(3, 13)
+#define WMT_PIN_SD0CMD WMT_PIN(3, 14)
+#define WMT_PIN_SD1DATA0 WMT_PIN(3, 24)
+#define WMT_PIN_SD1DATA1 WMT_PIN(3, 25)
+#define WMT_PIN_SD1DATA2 WMT_PIN(3, 26)
+#define WMT_PIN_SD1DATA3 WMT_PIN(3, 27)
+#define WMT_PIN_SD1DATA4 WMT_PIN(3, 28)
+#define WMT_PIN_SD1DATA5 WMT_PIN(3, 29)
+#define WMT_PIN_SD1DATA6 WMT_PIN(3, 30)
+#define WMT_PIN_SD1DATA7 WMT_PIN(3, 31)
+#define WMT_PIN_I2C0SCL WMT_PIN(5, 8)
+#define WMT_PIN_I2C0SDA WMT_PIN(5, 9)
+#define WMT_PIN_UART0RTS WMT_PIN(5, 16)
+#define WMT_PIN_UART0TXD WMT_PIN(5, 17)
+#define WMT_PIN_UART0CTS WMT_PIN(5, 18)
+#define WMT_PIN_UART0RXD WMT_PIN(5, 19)
+#define WMT_PIN_UART1RTS WMT_PIN(5, 20)
+#define WMT_PIN_UART1TXD WMT_PIN(5, 21)
+#define WMT_PIN_UART1CTS WMT_PIN(5, 22)
+#define WMT_PIN_UART1RXD WMT_PIN(5, 23)
+#define WMT_PIN_UART2RTS WMT_PIN(5, 24)
+#define WMT_PIN_UART2TXD WMT_PIN(5, 25)
+#define WMT_PIN_UART2CTS WMT_PIN(5, 26)
+#define WMT_PIN_UART2RXD WMT_PIN(5, 27)
+#define WMT_PIN_UART3RTS WMT_PIN(5, 28)
+#define WMT_PIN_UART3TXD WMT_PIN(5, 29)
+#define WMT_PIN_UART3CTS WMT_PIN(5, 30)
+#define WMT_PIN_UART3RXD WMT_PIN(5, 31)
+#define WMT_PIN_KPADROW0 WMT_PIN(6, 16)
+#define WMT_PIN_KPADROW1 WMT_PIN(6, 17)
+#define WMT_PIN_KPADCOL0 WMT_PIN(6, 18)
+#define WMT_PIN_KPADCOL1 WMT_PIN(6, 19)
+#define WMT_PIN_SD1CLK WMT_PIN(7, 0)
+#define WMT_PIN_SD1CMD WMT_PIN(7, 1)
+#define WMT_PIN_SD1WP WMT_PIN(7, 13)
+
+static const struct pinctrl_pin_desc wm8650_pins[] = {
+ PINCTRL_PIN(WMT_PIN_EXTGPIO0, "extgpio0"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO1, "extgpio1"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO2, "extgpio2"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO3, "extgpio3"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO4, "extgpio4"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO5, "extgpio5"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO6, "extgpio6"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO7, "extgpio7"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP0, "wakeup0"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP1, "wakeup1"),
+ PINCTRL_PIN(WMT_PIN_SUSGPIO0, "susgpio0"),
+ PINCTRL_PIN(WMT_PIN_SD0CD, "sd0_cd"),
+ PINCTRL_PIN(WMT_PIN_SD1CD, "sd1_cd"),
+ PINCTRL_PIN(WMT_PIN_VDOUT0, "vdout0"),
+ PINCTRL_PIN(WMT_PIN_VDOUT1, "vdout1"),
+ PINCTRL_PIN(WMT_PIN_VDOUT2, "vdout2"),
+ PINCTRL_PIN(WMT_PIN_VDOUT3, "vdout3"),
+ PINCTRL_PIN(WMT_PIN_VDOUT4, "vdout4"),
+ PINCTRL_PIN(WMT_PIN_VDOUT5, "vdout5"),
+ PINCTRL_PIN(WMT_PIN_VDOUT6, "vdout6"),
+ PINCTRL_PIN(WMT_PIN_VDOUT7, "vdout7"),
+ PINCTRL_PIN(WMT_PIN_VDOUT8, "vdout8"),
+ PINCTRL_PIN(WMT_PIN_VDOUT9, "vdout9"),
+ PINCTRL_PIN(WMT_PIN_VDOUT10, "vdout10"),
+ PINCTRL_PIN(WMT_PIN_VDOUT11, "vdout11"),
+ PINCTRL_PIN(WMT_PIN_VDOUT12, "vdout12"),
+ PINCTRL_PIN(WMT_PIN_VDOUT13, "vdout13"),
+ PINCTRL_PIN(WMT_PIN_VDOUT14, "vdout14"),
+ PINCTRL_PIN(WMT_PIN_VDOUT15, "vdout15"),
+ PINCTRL_PIN(WMT_PIN_VDOUT16, "vdout16"),
+ PINCTRL_PIN(WMT_PIN_VDOUT17, "vdout17"),
+ PINCTRL_PIN(WMT_PIN_VDOUT18, "vdout18"),
+ PINCTRL_PIN(WMT_PIN_VDOUT19, "vdout19"),
+ PINCTRL_PIN(WMT_PIN_VDOUT20, "vdout20"),
+ PINCTRL_PIN(WMT_PIN_VDOUT21, "vdout21"),
+ PINCTRL_PIN(WMT_PIN_VDOUT22, "vdout22"),
+ PINCTRL_PIN(WMT_PIN_VDOUT23, "vdout23"),
+ PINCTRL_PIN(WMT_PIN_VDIN0, "vdin0"),
+ PINCTRL_PIN(WMT_PIN_VDIN1, "vdin1"),
+ PINCTRL_PIN(WMT_PIN_VDIN2, "vdin2"),
+ PINCTRL_PIN(WMT_PIN_VDIN3, "vdin3"),
+ PINCTRL_PIN(WMT_PIN_VDIN4, "vdin4"),
+ PINCTRL_PIN(WMT_PIN_VDIN5, "vdin5"),
+ PINCTRL_PIN(WMT_PIN_VDIN6, "vdin6"),
+ PINCTRL_PIN(WMT_PIN_VDIN7, "vdin7"),
+ PINCTRL_PIN(WMT_PIN_I2C1SCL, "i2c1_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C1SDA, "i2c1_sda"),
+ PINCTRL_PIN(WMT_PIN_SPI0MOSI, "spi0_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI0MISO, "spi0_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI0SS0, "spi0_ss0"),
+ PINCTRL_PIN(WMT_PIN_SPI0CLK, "spi0_clk"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA0, "sd0_data0"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA1, "sd0_data1"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA2, "sd0_data2"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA3, "sd0_data3"),
+ PINCTRL_PIN(WMT_PIN_SD0CLK, "sd0_clk"),
+ PINCTRL_PIN(WMT_PIN_SD0WP, "sd0_wp"),
+ PINCTRL_PIN(WMT_PIN_SD0CMD, "sd0_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA0, "sd1_data0"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA1, "sd1_data1"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA2, "sd1_data2"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA3, "sd1_data3"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA4, "sd1_data4"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA5, "sd1_data5"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA6, "sd1_data6"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA7, "sd1_data7"),
+ PINCTRL_PIN(WMT_PIN_I2C0SCL, "i2c0_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C0SDA, "i2c0_sda"),
+ PINCTRL_PIN(WMT_PIN_UART0RTS, "uart0_rts"),
+ PINCTRL_PIN(WMT_PIN_UART0TXD, "uart0_txd"),
+ PINCTRL_PIN(WMT_PIN_UART0CTS, "uart0_cts"),
+ PINCTRL_PIN(WMT_PIN_UART0RXD, "uart0_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART1RTS, "uart1_rts"),
+ PINCTRL_PIN(WMT_PIN_UART1TXD, "uart1_txd"),
+ PINCTRL_PIN(WMT_PIN_UART1CTS, "uart1_cts"),
+ PINCTRL_PIN(WMT_PIN_UART1RXD, "uart1_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART2RTS, "uart2_rts"),
+ PINCTRL_PIN(WMT_PIN_UART2TXD, "uart2_txd"),
+ PINCTRL_PIN(WMT_PIN_UART2CTS, "uart2_cts"),
+ PINCTRL_PIN(WMT_PIN_UART2RXD, "uart2_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART3RTS, "uart3_rts"),
+ PINCTRL_PIN(WMT_PIN_UART3TXD, "uart3_txd"),
+ PINCTRL_PIN(WMT_PIN_UART3CTS, "uart3_cts"),
+ PINCTRL_PIN(WMT_PIN_UART3RXD, "uart3_rxd"),
+ PINCTRL_PIN(WMT_PIN_KPADROW0, "kpadrow0"),
+ PINCTRL_PIN(WMT_PIN_KPADROW1, "kpadrow1"),
+ PINCTRL_PIN(WMT_PIN_KPADCOL0, "kpadcol0"),
+ PINCTRL_PIN(WMT_PIN_KPADCOL1, "kpadcol1"),
+ PINCTRL_PIN(WMT_PIN_SD1CLK, "sd1_clk"),
+ PINCTRL_PIN(WMT_PIN_SD1CMD, "sd1_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD1WP, "sd1_wp"),
+};
+
+/* Order of these names must match the above list */
+static const char * const wm8650_groups[] = {
+ "extgpio0",
+ "extgpio1",
+ "extgpio2",
+ "extgpio3",
+ "extgpio4",
+ "extgpio5",
+ "extgpio6",
+ "extgpio7",
+ "wakeup0",
+ "wakeup1",
+ "susgpio0",
+ "sd0_cd",
+ "sd1_cd",
+ "vdout0",
+ "vdout1",
+ "vdout2",
+ "vdout3",
+ "vdout4",
+ "vdout5",
+ "vdout6",
+ "vdout7",
+ "vdout8",
+ "vdout9",
+ "vdout10",
+ "vdout11",
+ "vdout12",
+ "vdout13",
+ "vdout14",
+ "vdout15",
+ "vdout16",
+ "vdout17",
+ "vdout18",
+ "vdout19",
+ "vdout20",
+ "vdout21",
+ "vdout22",
+ "vdout23",
+ "vdin0",
+ "vdin1",
+ "vdin2",
+ "vdin3",
+ "vdin4",
+ "vdin5",
+ "vdin6",
+ "vdin7",
+ "i2c1_scl",
+ "i2c1_sda",
+ "spi0_mosi",
+ "spi0_miso",
+ "spi0_ss0",
+ "spi0_clk",
+ "sd0_data0",
+ "sd0_data1",
+ "sd0_data2",
+ "sd0_data3",
+ "sd0_clk",
+ "sd0_wp",
+ "sd0_cmd",
+ "sd1_data0",
+ "sd1_data1",
+ "sd1_data2",
+ "sd1_data3",
+ "sd1_data4",
+ "sd1_data5",
+ "sd1_data6",
+ "sd1_data7",
+ "i2c0_scl",
+ "i2c0_sda",
+ "uart0_rts",
+ "uart0_txd",
+ "uart0_cts",
+ "uart0_rxd",
+ "uart1_rts",
+ "uart1_txd",
+ "uart1_cts",
+ "uart1_rxd",
+ "uart2_rts",
+ "uart2_txd",
+ "uart2_cts",
+ "uart2_rxd",
+ "uart3_rts",
+ "uart3_txd",
+ "uart3_cts",
+ "uart3_rxd",
+ "kpadrow0",
+ "kpadrow1",
+ "kpadcol0",
+ "kpadcol1",
+ "sd1_clk",
+ "sd1_cmd",
+ "sd1_wp",
+};
+
+static int wm8650_pinctrl_probe(struct platform_device *pdev)
+{
+ struct wmt_pinctrl_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate data\n");
+ return -ENOMEM;
+ }
+
+ data->banks = wm8650_banks;
+ data->nbanks = ARRAY_SIZE(wm8650_banks);
+ data->pins = wm8650_pins;
+ data->npins = ARRAY_SIZE(wm8650_pins);
+ data->groups = wm8650_groups;
+ data->ngroups = ARRAY_SIZE(wm8650_groups);
+
+ return wmt_pinctrl_probe(pdev, data);
+}
+
+static int wm8650_pinctrl_remove(struct platform_device *pdev)
+{
+ return wmt_pinctrl_remove(pdev);
+}
+
+static struct of_device_id wmt_pinctrl_of_match[] = {
+ { .compatible = "wm,wm8650-pinctrl" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver wmt_pinctrl_driver = {
+ .probe = wm8650_pinctrl_probe,
+ .remove = wm8650_pinctrl_remove,
+ .driver = {
+ .name = "pinctrl-wm8650",
+ .owner = THIS_MODULE,
+ .of_match_table = wmt_pinctrl_of_match,
+ },
+};
+
+module_platform_driver(wmt_pinctrl_driver);
+
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_DESCRIPTION("Wondermedia WM8650 Pincontrol driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
new file mode 100644
index 0000000..b964cc5
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
@@ -0,0 +1,409 @@
+/*
+ * Pinctrl data for Wondermedia WM8750 SoC
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-wmt.h"
+
+/*
+ * Describe the register offsets within the GPIO memory space
+ * The dedicated external GPIO's should always be listed in bank 0
+ * so they are exported in the 0..31 range which is what users
+ * expect.
+ *
+ * Do not reorder these banks as it will change the pin numbering
+ */
+static const struct wmt_pinctrl_bank_registers wm8750_banks[] = {
+ WMT_PINCTRL_BANK(0x40, 0x80, 0xC0, 0x00, 0x480, 0x4C0), /* 0 */
+ WMT_PINCTRL_BANK(0x44, 0x84, 0xC4, 0x04, 0x484, 0x4C4), /* 1 */
+ WMT_PINCTRL_BANK(0x48, 0x88, 0xC8, 0x08, 0x488, 0x4C8), /* 2 */
+ WMT_PINCTRL_BANK(0x4C, 0x8C, 0xCC, 0x0C, 0x48C, 0x4CC), /* 3 */
+ WMT_PINCTRL_BANK(0x50, 0x90, 0xD0, 0x10, 0x490, 0x4D0), /* 4 */
+ WMT_PINCTRL_BANK(0x54, 0x94, 0xD4, 0x14, 0x494, 0x4D4), /* 5 */
+ WMT_PINCTRL_BANK(0x58, 0x98, 0xD8, 0x18, 0x498, 0x4D8), /* 6 */
+ WMT_PINCTRL_BANK(0x5C, 0x9C, 0xDC, 0x1C, 0x49C, 0x4DC), /* 7 */
+ WMT_PINCTRL_BANK(0x60, 0xA0, 0xE0, 0x20, 0x4A0, 0x4E0), /* 8 */
+ WMT_PINCTRL_BANK(0x70, 0xB0, 0xF0, 0x30, 0x4B0, 0x4F0), /* 9 */
+ WMT_PINCTRL_BANK(0x7C, 0xBC, 0xDC, 0x3C, 0x4BC, 0x4FC), /* 10 */
+};
+
+/* Please keep sorted by bank/bit */
+#define WMT_PIN_EXTGPIO0 WMT_PIN(0, 0)
+#define WMT_PIN_EXTGPIO1 WMT_PIN(0, 1)
+#define WMT_PIN_EXTGPIO2 WMT_PIN(0, 2)
+#define WMT_PIN_EXTGPIO3 WMT_PIN(0, 3)
+#define WMT_PIN_EXTGPIO4 WMT_PIN(0, 4)
+#define WMT_PIN_EXTGPIO5 WMT_PIN(0, 5)
+#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
+#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
+#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1 WMT_PIN(0, 16)
+#define WMT_PIN_SD0CD WMT_PIN(0, 28)
+#define WMT_PIN_VDOUT0 WMT_PIN(1, 0)
+#define WMT_PIN_VDOUT1 WMT_PIN(1, 1)
+#define WMT_PIN_VDOUT2 WMT_PIN(1, 2)
+#define WMT_PIN_VDOUT3 WMT_PIN(1, 3)
+#define WMT_PIN_VDOUT4 WMT_PIN(1, 4)
+#define WMT_PIN_VDOUT5 WMT_PIN(1, 5)
+#define WMT_PIN_VDOUT6 WMT_PIN(1, 6)
+#define WMT_PIN_VDOUT7 WMT_PIN(1, 7)
+#define WMT_PIN_VDOUT8 WMT_PIN(1, 8)
+#define WMT_PIN_VDOUT9 WMT_PIN(1, 9)
+#define WMT_PIN_VDOUT10 WMT_PIN(1, 10)
+#define WMT_PIN_VDOUT11 WMT_PIN(1, 11)
+#define WMT_PIN_VDOUT12 WMT_PIN(1, 12)
+#define WMT_PIN_VDOUT13 WMT_PIN(1, 13)
+#define WMT_PIN_VDOUT14 WMT_PIN(1, 14)
+#define WMT_PIN_VDOUT15 WMT_PIN(1, 15)
+#define WMT_PIN_VDOUT16 WMT_PIN(1, 16)
+#define WMT_PIN_VDOUT17 WMT_PIN(1, 17)
+#define WMT_PIN_VDOUT18 WMT_PIN(1, 18)
+#define WMT_PIN_VDOUT19 WMT_PIN(1, 19)
+#define WMT_PIN_VDOUT20 WMT_PIN(1, 20)
+#define WMT_PIN_VDOUT21 WMT_PIN(1, 21)
+#define WMT_PIN_VDOUT22 WMT_PIN(1, 22)
+#define WMT_PIN_VDOUT23 WMT_PIN(1, 23)
+#define WMT_PIN_VDIN0 WMT_PIN(2, 0)
+#define WMT_PIN_VDIN1 WMT_PIN(2, 1)
+#define WMT_PIN_VDIN2 WMT_PIN(2, 2)
+#define WMT_PIN_VDIN3 WMT_PIN(2, 3)
+#define WMT_PIN_VDIN4 WMT_PIN(2, 4)
+#define WMT_PIN_VDIN5 WMT_PIN(2, 5)
+#define WMT_PIN_VDIN6 WMT_PIN(2, 6)
+#define WMT_PIN_VDIN7 WMT_PIN(2, 7)
+#define WMT_PIN_SPI0_MOSI WMT_PIN(2, 24)
+#define WMT_PIN_SPI0_MISO WMT_PIN(2, 25)
+#define WMT_PIN_SPI0_SS WMT_PIN(2, 26)
+#define WMT_PIN_SPI0_CLK WMT_PIN(2, 27)
+#define WMT_PIN_SPI0_SSB WMT_PIN(2, 28)
+#define WMT_PIN_SD0CLK WMT_PIN(3, 17)
+#define WMT_PIN_SD0CMD WMT_PIN(3, 18)
+#define WMT_PIN_SD0WP WMT_PIN(3, 19)
+#define WMT_PIN_SD0DATA0 WMT_PIN(3, 20)
+#define WMT_PIN_SD0DATA1 WMT_PIN(3, 21)
+#define WMT_PIN_SD0DATA2 WMT_PIN(3, 22)
+#define WMT_PIN_SD0DATA3 WMT_PIN(3, 23)
+#define WMT_PIN_SD1DATA0 WMT_PIN(3, 24)
+#define WMT_PIN_SD1DATA1 WMT_PIN(3, 25)
+#define WMT_PIN_SD1DATA2 WMT_PIN(3, 26)
+#define WMT_PIN_SD1DATA3 WMT_PIN(3, 27)
+#define WMT_PIN_SD1DATA4 WMT_PIN(3, 28)
+#define WMT_PIN_SD1DATA5 WMT_PIN(3, 29)
+#define WMT_PIN_SD1DATA6 WMT_PIN(3, 30)
+#define WMT_PIN_SD1DATA7 WMT_PIN(3, 31)
+#define WMT_PIN_I2C0_SCL WMT_PIN(5, 8)
+#define WMT_PIN_I2C0_SDA WMT_PIN(5, 9)
+#define WMT_PIN_I2C1_SCL WMT_PIN(5, 10)
+#define WMT_PIN_I2C1_SDA WMT_PIN(5, 11)
+#define WMT_PIN_I2C2_SCL WMT_PIN(5, 12)
+#define WMT_PIN_I2C2_SDA WMT_PIN(5, 13)
+#define WMT_PIN_UART0_RTS WMT_PIN(5, 16)
+#define WMT_PIN_UART0_TXD WMT_PIN(5, 17)
+#define WMT_PIN_UART0_CTS WMT_PIN(5, 18)
+#define WMT_PIN_UART0_RXD WMT_PIN(5, 19)
+#define WMT_PIN_UART1_RTS WMT_PIN(5, 20)
+#define WMT_PIN_UART1_TXD WMT_PIN(5, 21)
+#define WMT_PIN_UART1_CTS WMT_PIN(5, 22)
+#define WMT_PIN_UART1_RXD WMT_PIN(5, 23)
+#define WMT_PIN_UART2_RTS WMT_PIN(5, 24)
+#define WMT_PIN_UART2_TXD WMT_PIN(5, 25)
+#define WMT_PIN_UART2_CTS WMT_PIN(5, 26)
+#define WMT_PIN_UART2_RXD WMT_PIN(5, 27)
+#define WMT_PIN_UART3_RTS WMT_PIN(5, 28)
+#define WMT_PIN_UART3_TXD WMT_PIN(5, 29)
+#define WMT_PIN_UART3_CTS WMT_PIN(5, 30)
+#define WMT_PIN_UART3_RXD WMT_PIN(5, 31)
+#define WMT_PIN_SD2CD WMT_PIN(6, 0)
+#define WMT_PIN_SD2DATA3 WMT_PIN(6, 1)
+#define WMT_PIN_SD2DATA0 WMT_PIN(6, 2)
+#define WMT_PIN_SD2WP WMT_PIN(6, 3)
+#define WMT_PIN_SD2DATA1 WMT_PIN(6, 4)
+#define WMT_PIN_SD2DATA2 WMT_PIN(6, 5)
+#define WMT_PIN_SD2CMD WMT_PIN(6, 6)
+#define WMT_PIN_SD2CLK WMT_PIN(6, 7)
+#define WMT_PIN_SD2PWR WMT_PIN(6, 9)
+#define WMT_PIN_SD1CLK WMT_PIN(7, 0)
+#define WMT_PIN_SD1CMD WMT_PIN(7, 1)
+#define WMT_PIN_SD1PWR WMT_PIN(7, 10)
+#define WMT_PIN_SD1WP WMT_PIN(7, 11)
+#define WMT_PIN_SD1CD WMT_PIN(7, 12)
+#define WMT_PIN_SPI0SS3 WMT_PIN(7, 24)
+#define WMT_PIN_SPI0SS2 WMT_PIN(7, 25)
+#define WMT_PIN_PWMOUT1 WMT_PIN(7, 26)
+#define WMT_PIN_PWMOUT0 WMT_PIN(7, 27)
+
+static const struct pinctrl_pin_desc wm8750_pins[] = {
+ PINCTRL_PIN(WMT_PIN_EXTGPIO0, "extgpio0"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO1, "extgpio1"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO2, "extgpio2"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO3, "extgpio3"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO4, "extgpio4"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO5, "extgpio5"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO6, "extgpio6"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO7, "extgpio7"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP0, "wakeup0"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP1, "wakeup1"),
+ PINCTRL_PIN(WMT_PIN_SD0CD, "sd0_cd"),
+ PINCTRL_PIN(WMT_PIN_VDOUT0, "vdout0"),
+ PINCTRL_PIN(WMT_PIN_VDOUT1, "vdout1"),
+ PINCTRL_PIN(WMT_PIN_VDOUT2, "vdout2"),
+ PINCTRL_PIN(WMT_PIN_VDOUT3, "vdout3"),
+ PINCTRL_PIN(WMT_PIN_VDOUT4, "vdout4"),
+ PINCTRL_PIN(WMT_PIN_VDOUT5, "vdout5"),
+ PINCTRL_PIN(WMT_PIN_VDOUT6, "vdout6"),
+ PINCTRL_PIN(WMT_PIN_VDOUT7, "vdout7"),
+ PINCTRL_PIN(WMT_PIN_VDOUT8, "vdout8"),
+ PINCTRL_PIN(WMT_PIN_VDOUT9, "vdout9"),
+ PINCTRL_PIN(WMT_PIN_VDOUT10, "vdout10"),
+ PINCTRL_PIN(WMT_PIN_VDOUT11, "vdout11"),
+ PINCTRL_PIN(WMT_PIN_VDOUT12, "vdout12"),
+ PINCTRL_PIN(WMT_PIN_VDOUT13, "vdout13"),
+ PINCTRL_PIN(WMT_PIN_VDOUT14, "vdout14"),
+ PINCTRL_PIN(WMT_PIN_VDOUT15, "vdout15"),
+ PINCTRL_PIN(WMT_PIN_VDOUT16, "vdout16"),
+ PINCTRL_PIN(WMT_PIN_VDOUT17, "vdout17"),
+ PINCTRL_PIN(WMT_PIN_VDOUT18, "vdout18"),
+ PINCTRL_PIN(WMT_PIN_VDOUT19, "vdout19"),
+ PINCTRL_PIN(WMT_PIN_VDOUT20, "vdout20"),
+ PINCTRL_PIN(WMT_PIN_VDOUT21, "vdout21"),
+ PINCTRL_PIN(WMT_PIN_VDOUT22, "vdout22"),
+ PINCTRL_PIN(WMT_PIN_VDOUT23, "vdout23"),
+ PINCTRL_PIN(WMT_PIN_VDIN0, "vdin0"),
+ PINCTRL_PIN(WMT_PIN_VDIN1, "vdin1"),
+ PINCTRL_PIN(WMT_PIN_VDIN2, "vdin2"),
+ PINCTRL_PIN(WMT_PIN_VDIN3, "vdin3"),
+ PINCTRL_PIN(WMT_PIN_VDIN4, "vdin4"),
+ PINCTRL_PIN(WMT_PIN_VDIN5, "vdin5"),
+ PINCTRL_PIN(WMT_PIN_VDIN6, "vdin6"),
+ PINCTRL_PIN(WMT_PIN_VDIN7, "vdin7"),
+ PINCTRL_PIN(WMT_PIN_SPI0_MOSI, "spi0_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI0_MISO, "spi0_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI0_SS, "spi0_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI0_CLK, "spi0_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI0_SSB, "spi0_ssb"),
+ PINCTRL_PIN(WMT_PIN_SD0CLK, "sd0_clk"),
+ PINCTRL_PIN(WMT_PIN_SD0CMD, "sd0_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD0WP, "sd0_wp"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA0, "sd0_data0"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA1, "sd0_data1"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA2, "sd0_data2"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA3, "sd0_data3"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA0, "sd1_data0"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA1, "sd1_data1"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA2, "sd1_data2"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA3, "sd1_data3"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA4, "sd1_data4"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA5, "sd1_data5"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA6, "sd1_data6"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA7, "sd1_data7"),
+ PINCTRL_PIN(WMT_PIN_I2C0_SCL, "i2c0_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C0_SDA, "i2c0_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C1_SCL, "i2c1_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C1_SDA, "i2c1_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C2_SCL, "i2c2_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C2_SDA, "i2c2_sda"),
+ PINCTRL_PIN(WMT_PIN_UART0_RTS, "uart0_rts"),
+ PINCTRL_PIN(WMT_PIN_UART0_TXD, "uart0_txd"),
+ PINCTRL_PIN(WMT_PIN_UART0_CTS, "uart0_cts"),
+ PINCTRL_PIN(WMT_PIN_UART0_RXD, "uart0_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART1_RTS, "uart1_rts"),
+ PINCTRL_PIN(WMT_PIN_UART1_TXD, "uart1_txd"),
+ PINCTRL_PIN(WMT_PIN_UART1_CTS, "uart1_cts"),
+ PINCTRL_PIN(WMT_PIN_UART1_RXD, "uart1_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART2_RTS, "uart2_rts"),
+ PINCTRL_PIN(WMT_PIN_UART2_TXD, "uart2_txd"),
+ PINCTRL_PIN(WMT_PIN_UART2_CTS, "uart2_cts"),
+ PINCTRL_PIN(WMT_PIN_UART2_RXD, "uart2_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART3_RTS, "uart3_rts"),
+ PINCTRL_PIN(WMT_PIN_UART3_TXD, "uart3_txd"),
+ PINCTRL_PIN(WMT_PIN_UART3_CTS, "uart3_cts"),
+ PINCTRL_PIN(WMT_PIN_UART3_RXD, "uart3_rxd"),
+ PINCTRL_PIN(WMT_PIN_SD2CD, "sd2_cd"),
+ PINCTRL_PIN(WMT_PIN_SD2DATA3, "sd2_data3"),
+ PINCTRL_PIN(WMT_PIN_SD2DATA0, "sd2_data0"),
+ PINCTRL_PIN(WMT_PIN_SD2WP, "sd2_wp"),
+ PINCTRL_PIN(WMT_PIN_SD2DATA1, "sd2_data1"),
+ PINCTRL_PIN(WMT_PIN_SD2DATA2, "sd2_data2"),
+ PINCTRL_PIN(WMT_PIN_SD2CMD, "sd2_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD2CLK, "sd2_clk"),
+ PINCTRL_PIN(WMT_PIN_SD2PWR, "sd2_pwr"),
+ PINCTRL_PIN(WMT_PIN_SD1CLK, "sd1_clk"),
+ PINCTRL_PIN(WMT_PIN_SD1CMD, "sd1_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD1PWR, "sd1_pwr"),
+ PINCTRL_PIN(WMT_PIN_SD1WP, "sd1_wp"),
+ PINCTRL_PIN(WMT_PIN_SD1CD, "sd1_cd"),
+ PINCTRL_PIN(WMT_PIN_SPI0SS3, "spi0_ss3"),
+ PINCTRL_PIN(WMT_PIN_SPI0SS2, "spi0_ss2"),
+ PINCTRL_PIN(WMT_PIN_PWMOUT1, "pwmout1"),
+ PINCTRL_PIN(WMT_PIN_PWMOUT0, "pwmout0"),
+};
+
+/* Order of these names must match the above list */
+static const char * const wm8750_groups[] = {
+ "extgpio0",
+ "extgpio1",
+ "extgpio2",
+ "extgpio3",
+ "extgpio4",
+ "extgpio5",
+ "extgpio6",
+ "extgpio7",
+ "wakeup0",
+ "wakeup1",
+ "sd0_cd",
+ "vdout0",
+ "vdout1",
+ "vdout2",
+ "vdout3",
+ "vdout4",
+ "vdout5",
+ "vdout6",
+ "vdout7",
+ "vdout8",
+ "vdout9",
+ "vdout10",
+ "vdout11",
+ "vdout12",
+ "vdout13",
+ "vdout14",
+ "vdout15",
+ "vdout16",
+ "vdout17",
+ "vdout18",
+ "vdout19",
+ "vdout20",
+ "vdout21",
+ "vdout22",
+ "vdout23",
+ "vdin0",
+ "vdin1",
+ "vdin2",
+ "vdin3",
+ "vdin4",
+ "vdin5",
+ "vdin6",
+ "vdin7",
+ "spi0_mosi",
+ "spi0_miso",
+ "spi0_ss",
+ "spi0_clk",
+ "spi0_ssb",
+ "sd0_clk",
+ "sd0_cmd",
+ "sd0_wp",
+ "sd0_data0",
+ "sd0_data1",
+ "sd0_data2",
+ "sd0_data3",
+ "sd1_data0",
+ "sd1_data1",
+ "sd1_data2",
+ "sd1_data3",
+ "sd1_data4",
+ "sd1_data5",
+ "sd1_data6",
+ "sd1_data7",
+ "i2c0_scl",
+ "i2c0_sda",
+ "i2c1_scl",
+ "i2c1_sda",
+ "i2c2_scl",
+ "i2c2_sda",
+ "uart0_rts",
+ "uart0_txd",
+ "uart0_cts",
+ "uart0_rxd",
+ "uart1_rts",
+ "uart1_txd",
+ "uart1_cts",
+ "uart1_rxd",
+ "uart2_rts",
+ "uart2_txd",
+ "uart2_cts",
+ "uart2_rxd",
+ "uart3_rts",
+ "uart3_txd",
+ "uart3_cts",
+ "uart3_rxd",
+ "sd2_cd",
+ "sd2_data3",
+ "sd2_data0",
+ "sd2_wp",
+ "sd2_data1",
+ "sd2_data2",
+ "sd2_cmd",
+ "sd2_clk",
+ "sd2_pwr",
+ "sd1_clk",
+ "sd1_cmd",
+ "sd1_pwr",
+ "sd1_wp",
+ "sd1_cd",
+ "spi0_ss3",
+ "spi0_ss2",
+ "pwmout1",
+ "pwmout0",
+};
+
+static int wm8750_pinctrl_probe(struct platform_device *pdev)
+{
+ struct wmt_pinctrl_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate data\n");
+ return -ENOMEM;
+ }
+
+ data->banks = wm8750_banks;
+ data->nbanks = ARRAY_SIZE(wm8750_banks);
+ data->pins = wm8750_pins;
+ data->npins = ARRAY_SIZE(wm8750_pins);
+ data->groups = wm8750_groups;
+ data->ngroups = ARRAY_SIZE(wm8750_groups);
+
+ return wmt_pinctrl_probe(pdev, data);
+}
+
+static int wm8750_pinctrl_remove(struct platform_device *pdev)
+{
+ return wmt_pinctrl_remove(pdev);
+}
+
+static struct of_device_id wmt_pinctrl_of_match[] = {
+ { .compatible = "wm,wm8750-pinctrl" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver wmt_pinctrl_driver = {
+ .probe = wm8750_pinctrl_probe,
+ .remove = wm8750_pinctrl_remove,
+ .driver = {
+ .name = "pinctrl-wm8750",
+ .owner = THIS_MODULE,
+ .of_match_table = wmt_pinctrl_of_match,
+ },
+};
+
+module_platform_driver(wmt_pinctrl_driver);
+
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_DESCRIPTION("Wondermedia WM8750 Pincontrol driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8850.c b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
new file mode 100644
index 0000000..ecadce9c
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
@@ -0,0 +1,388 @@
+/*
+ * Pinctrl data for Wondermedia WM8850 SoC
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-wmt.h"
+
+/*
+ * Describe the register offsets within the GPIO memory space
+ * The dedicated external GPIO's should always be listed in bank 0
+ * so they are exported in the 0..31 range which is what users
+ * expect.
+ *
+ * Do not reorder these banks as it will change the pin numbering
+ */
+static const struct wmt_pinctrl_bank_registers wm8850_banks[] = {
+ WMT_PINCTRL_BANK(0x40, 0x80, 0xC0, 0x00, 0x480, 0x4C0), /* 0 */
+ WMT_PINCTRL_BANK(0x44, 0x84, 0xC4, 0x04, 0x484, 0x4C4), /* 1 */
+ WMT_PINCTRL_BANK(0x48, 0x88, 0xC8, 0x08, 0x488, 0x4C8), /* 2 */
+ WMT_PINCTRL_BANK(0x4C, 0x8C, 0xCC, 0x0C, 0x48C, 0x4CC), /* 3 */
+ WMT_PINCTRL_BANK(0x50, 0x90, 0xD0, 0x10, 0x490, 0x4D0), /* 4 */
+ WMT_PINCTRL_BANK(0x54, 0x94, 0xD4, 0x14, 0x494, 0x4D4), /* 5 */
+ WMT_PINCTRL_BANK(0x58, 0x98, 0xD8, 0x18, 0x498, 0x4D8), /* 6 */
+ WMT_PINCTRL_BANK(0x5C, 0x9C, 0xDC, 0x1C, 0x49C, 0x4DC), /* 7 */
+ WMT_PINCTRL_BANK(0x60, 0xA0, 0xE0, 0x20, 0x4A0, 0x4E0), /* 8 */
+ WMT_PINCTRL_BANK(0x70, 0xB0, 0xF0, 0x30, 0x4B0, 0x4F0), /* 9 */
+ WMT_PINCTRL_BANK(0x7C, 0xBC, 0xDC, 0x3C, 0x4BC, 0x4FC), /* 10 */
+};
+
+/* Please keep sorted by bank/bit */
+#define WMT_PIN_EXTGPIO0 WMT_PIN(0, 0)
+#define WMT_PIN_EXTGPIO1 WMT_PIN(0, 1)
+#define WMT_PIN_EXTGPIO2 WMT_PIN(0, 2)
+#define WMT_PIN_EXTGPIO3 WMT_PIN(0, 3)
+#define WMT_PIN_EXTGPIO4 WMT_PIN(0, 4)
+#define WMT_PIN_EXTGPIO5 WMT_PIN(0, 5)
+#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
+#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
+#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17)
+#define WMT_PIN_WAKEUP2 WMT_PIN(0, 18)
+#define WMT_PIN_WAKEUP3 WMT_PIN(0, 19)
+#define WMT_PIN_SUSGPIO0 WMT_PIN(0, 21)
+#define WMT_PIN_SUSGPIO1 WMT_PIN(0, 22)
+#define WMT_PIN_SD0CD WMT_PIN(0, 28)
+#define WMT_PIN_VDOUT0 WMT_PIN(1, 0)
+#define WMT_PIN_VDOUT1 WMT_PIN(1, 1)
+#define WMT_PIN_VDOUT2 WMT_PIN(1, 2)
+#define WMT_PIN_VDOUT3 WMT_PIN(1, 3)
+#define WMT_PIN_VDOUT4 WMT_PIN(1, 4)
+#define WMT_PIN_VDOUT5 WMT_PIN(1, 5)
+#define WMT_PIN_VDOUT6 WMT_PIN(1, 6)
+#define WMT_PIN_VDOUT7 WMT_PIN(1, 7)
+#define WMT_PIN_VDOUT8 WMT_PIN(1, 8)
+#define WMT_PIN_VDOUT9 WMT_PIN(1, 9)
+#define WMT_PIN_VDOUT10 WMT_PIN(1, 10)
+#define WMT_PIN_VDOUT11 WMT_PIN(1, 11)
+#define WMT_PIN_VDOUT12 WMT_PIN(1, 12)
+#define WMT_PIN_VDOUT13 WMT_PIN(1, 13)
+#define WMT_PIN_VDOUT14 WMT_PIN(1, 14)
+#define WMT_PIN_VDOUT15 WMT_PIN(1, 15)
+#define WMT_PIN_VDOUT16 WMT_PIN(1, 16)
+#define WMT_PIN_VDOUT17 WMT_PIN(1, 17)
+#define WMT_PIN_VDOUT18 WMT_PIN(1, 18)
+#define WMT_PIN_VDOUT19 WMT_PIN(1, 19)
+#define WMT_PIN_VDOUT20 WMT_PIN(1, 20)
+#define WMT_PIN_VDOUT21 WMT_PIN(1, 21)
+#define WMT_PIN_VDOUT22 WMT_PIN(1, 22)
+#define WMT_PIN_VDOUT23 WMT_PIN(1, 23)
+#define WMT_PIN_VDIN0 WMT_PIN(2, 0)
+#define WMT_PIN_VDIN1 WMT_PIN(2, 1)
+#define WMT_PIN_VDIN2 WMT_PIN(2, 2)
+#define WMT_PIN_VDIN3 WMT_PIN(2, 3)
+#define WMT_PIN_VDIN4 WMT_PIN(2, 4)
+#define WMT_PIN_VDIN5 WMT_PIN(2, 5)
+#define WMT_PIN_VDIN6 WMT_PIN(2, 6)
+#define WMT_PIN_VDIN7 WMT_PIN(2, 7)
+#define WMT_PIN_SPI0_MOSI WMT_PIN(2, 24)
+#define WMT_PIN_SPI0_MISO WMT_PIN(2, 25)
+#define WMT_PIN_SPI0_SS WMT_PIN(2, 26)
+#define WMT_PIN_SPI0_CLK WMT_PIN(2, 27)
+#define WMT_PIN_SPI0_SSB WMT_PIN(2, 28)
+#define WMT_PIN_SD0CLK WMT_PIN(3, 17)
+#define WMT_PIN_SD0CMD WMT_PIN(3, 18)
+#define WMT_PIN_SD0WP WMT_PIN(3, 19)
+#define WMT_PIN_SD0DATA0 WMT_PIN(3, 20)
+#define WMT_PIN_SD0DATA1 WMT_PIN(3, 21)
+#define WMT_PIN_SD0DATA2 WMT_PIN(3, 22)
+#define WMT_PIN_SD0DATA3 WMT_PIN(3, 23)
+#define WMT_PIN_SD1DATA0 WMT_PIN(3, 24)
+#define WMT_PIN_SD1DATA1 WMT_PIN(3, 25)
+#define WMT_PIN_SD1DATA2 WMT_PIN(3, 26)
+#define WMT_PIN_SD1DATA3 WMT_PIN(3, 27)
+#define WMT_PIN_SD1DATA4 WMT_PIN(3, 28)
+#define WMT_PIN_SD1DATA5 WMT_PIN(3, 29)
+#define WMT_PIN_SD1DATA6 WMT_PIN(3, 30)
+#define WMT_PIN_SD1DATA7 WMT_PIN(3, 31)
+#define WMT_PIN_I2C0_SCL WMT_PIN(5, 8)
+#define WMT_PIN_I2C0_SDA WMT_PIN(5, 9)
+#define WMT_PIN_I2C1_SCL WMT_PIN(5, 10)
+#define WMT_PIN_I2C1_SDA WMT_PIN(5, 11)
+#define WMT_PIN_I2C2_SCL WMT_PIN(5, 12)
+#define WMT_PIN_I2C2_SDA WMT_PIN(5, 13)
+#define WMT_PIN_UART0_RTS WMT_PIN(5, 16)
+#define WMT_PIN_UART0_TXD WMT_PIN(5, 17)
+#define WMT_PIN_UART0_CTS WMT_PIN(5, 18)
+#define WMT_PIN_UART0_RXD WMT_PIN(5, 19)
+#define WMT_PIN_UART1_RTS WMT_PIN(5, 20)
+#define WMT_PIN_UART1_TXD WMT_PIN(5, 21)
+#define WMT_PIN_UART1_CTS WMT_PIN(5, 22)
+#define WMT_PIN_UART1_RXD WMT_PIN(5, 23)
+#define WMT_PIN_UART2_RTS WMT_PIN(5, 24)
+#define WMT_PIN_UART2_TXD WMT_PIN(5, 25)
+#define WMT_PIN_UART2_CTS WMT_PIN(5, 26)
+#define WMT_PIN_UART2_RXD WMT_PIN(5, 27)
+#define WMT_PIN_SD2WP WMT_PIN(6, 3)
+#define WMT_PIN_SD2CMD WMT_PIN(6, 6)
+#define WMT_PIN_SD2CLK WMT_PIN(6, 7)
+#define WMT_PIN_SD2PWR WMT_PIN(6, 9)
+#define WMT_PIN_SD1CLK WMT_PIN(7, 0)
+#define WMT_PIN_SD1CMD WMT_PIN(7, 1)
+#define WMT_PIN_SD1PWR WMT_PIN(7, 10)
+#define WMT_PIN_SD1WP WMT_PIN(7, 11)
+#define WMT_PIN_SD1CD WMT_PIN(7, 12)
+#define WMT_PIN_PWMOUT1 WMT_PIN(7, 26)
+#define WMT_PIN_PWMOUT0 WMT_PIN(7, 27)
+
+static const struct pinctrl_pin_desc wm8850_pins[] = {
+ PINCTRL_PIN(WMT_PIN_EXTGPIO0, "extgpio0"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO1, "extgpio1"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO2, "extgpio2"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO3, "extgpio3"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO4, "extgpio4"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO5, "extgpio5"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO6, "extgpio6"),
+ PINCTRL_PIN(WMT_PIN_EXTGPIO7, "extgpio7"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP0, "wakeup0"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP1, "wakeup1"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP2, "wakeup2"),
+ PINCTRL_PIN(WMT_PIN_WAKEUP3, "wakeup3"),
+ PINCTRL_PIN(WMT_PIN_SUSGPIO0, "susgpio0"),
+ PINCTRL_PIN(WMT_PIN_SUSGPIO1, "susgpio1"),
+ PINCTRL_PIN(WMT_PIN_SD0CD, "sd0_cd"),
+ PINCTRL_PIN(WMT_PIN_VDOUT0, "vdout0"),
+ PINCTRL_PIN(WMT_PIN_VDOUT1, "vdout1"),
+ PINCTRL_PIN(WMT_PIN_VDOUT2, "vdout2"),
+ PINCTRL_PIN(WMT_PIN_VDOUT3, "vdout3"),
+ PINCTRL_PIN(WMT_PIN_VDOUT4, "vdout4"),
+ PINCTRL_PIN(WMT_PIN_VDOUT5, "vdout5"),
+ PINCTRL_PIN(WMT_PIN_VDOUT6, "vdout6"),
+ PINCTRL_PIN(WMT_PIN_VDOUT7, "vdout7"),
+ PINCTRL_PIN(WMT_PIN_VDOUT8, "vdout8"),
+ PINCTRL_PIN(WMT_PIN_VDOUT9, "vdout9"),
+ PINCTRL_PIN(WMT_PIN_VDOUT10, "vdout10"),
+ PINCTRL_PIN(WMT_PIN_VDOUT11, "vdout11"),
+ PINCTRL_PIN(WMT_PIN_VDOUT12, "vdout12"),
+ PINCTRL_PIN(WMT_PIN_VDOUT13, "vdout13"),
+ PINCTRL_PIN(WMT_PIN_VDOUT14, "vdout14"),
+ PINCTRL_PIN(WMT_PIN_VDOUT15, "vdout15"),
+ PINCTRL_PIN(WMT_PIN_VDOUT16, "vdout16"),
+ PINCTRL_PIN(WMT_PIN_VDOUT17, "vdout17"),
+ PINCTRL_PIN(WMT_PIN_VDOUT18, "vdout18"),
+ PINCTRL_PIN(WMT_PIN_VDOUT19, "vdout19"),
+ PINCTRL_PIN(WMT_PIN_VDOUT20, "vdout20"),
+ PINCTRL_PIN(WMT_PIN_VDOUT21, "vdout21"),
+ PINCTRL_PIN(WMT_PIN_VDOUT22, "vdout22"),
+ PINCTRL_PIN(WMT_PIN_VDOUT23, "vdout23"),
+ PINCTRL_PIN(WMT_PIN_VDIN0, "vdin0"),
+ PINCTRL_PIN(WMT_PIN_VDIN1, "vdin1"),
+ PINCTRL_PIN(WMT_PIN_VDIN2, "vdin2"),
+ PINCTRL_PIN(WMT_PIN_VDIN3, "vdin3"),
+ PINCTRL_PIN(WMT_PIN_VDIN4, "vdin4"),
+ PINCTRL_PIN(WMT_PIN_VDIN5, "vdin5"),
+ PINCTRL_PIN(WMT_PIN_VDIN6, "vdin6"),
+ PINCTRL_PIN(WMT_PIN_VDIN7, "vdin7"),
+ PINCTRL_PIN(WMT_PIN_SPI0_MOSI, "spi0_mosi"),
+ PINCTRL_PIN(WMT_PIN_SPI0_MISO, "spi0_miso"),
+ PINCTRL_PIN(WMT_PIN_SPI0_SS, "spi0_ss"),
+ PINCTRL_PIN(WMT_PIN_SPI0_CLK, "spi0_clk"),
+ PINCTRL_PIN(WMT_PIN_SPI0_SSB, "spi0_ssb"),
+ PINCTRL_PIN(WMT_PIN_SD0CLK, "sd0_clk"),
+ PINCTRL_PIN(WMT_PIN_SD0CMD, "sd0_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD0WP, "sd0_wp"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA0, "sd0_data0"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA1, "sd0_data1"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA2, "sd0_data2"),
+ PINCTRL_PIN(WMT_PIN_SD0DATA3, "sd0_data3"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA0, "sd1_data0"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA1, "sd1_data1"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA2, "sd1_data2"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA3, "sd1_data3"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA4, "sd1_data4"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA5, "sd1_data5"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA6, "sd1_data6"),
+ PINCTRL_PIN(WMT_PIN_SD1DATA7, "sd1_data7"),
+ PINCTRL_PIN(WMT_PIN_I2C0_SCL, "i2c0_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C0_SDA, "i2c0_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C1_SCL, "i2c1_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C1_SDA, "i2c1_sda"),
+ PINCTRL_PIN(WMT_PIN_I2C2_SCL, "i2c2_scl"),
+ PINCTRL_PIN(WMT_PIN_I2C2_SDA, "i2c2_sda"),
+ PINCTRL_PIN(WMT_PIN_UART0_RTS, "uart0_rts"),
+ PINCTRL_PIN(WMT_PIN_UART0_TXD, "uart0_txd"),
+ PINCTRL_PIN(WMT_PIN_UART0_CTS, "uart0_cts"),
+ PINCTRL_PIN(WMT_PIN_UART0_RXD, "uart0_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART1_RTS, "uart1_rts"),
+ PINCTRL_PIN(WMT_PIN_UART1_TXD, "uart1_txd"),
+ PINCTRL_PIN(WMT_PIN_UART1_CTS, "uart1_cts"),
+ PINCTRL_PIN(WMT_PIN_UART1_RXD, "uart1_rxd"),
+ PINCTRL_PIN(WMT_PIN_UART2_RTS, "uart2_rts"),
+ PINCTRL_PIN(WMT_PIN_UART2_TXD, "uart2_txd"),
+ PINCTRL_PIN(WMT_PIN_UART2_CTS, "uart2_cts"),
+ PINCTRL_PIN(WMT_PIN_UART2_RXD, "uart2_rxd"),
+ PINCTRL_PIN(WMT_PIN_SD2WP, "sd2_wp"),
+ PINCTRL_PIN(WMT_PIN_SD2CMD, "sd2_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD2CLK, "sd2_clk"),
+ PINCTRL_PIN(WMT_PIN_SD2PWR, "sd2_pwr"),
+ PINCTRL_PIN(WMT_PIN_SD1CLK, "sd1_clk"),
+ PINCTRL_PIN(WMT_PIN_SD1CMD, "sd1_cmd"),
+ PINCTRL_PIN(WMT_PIN_SD1PWR, "sd1_pwr"),
+ PINCTRL_PIN(WMT_PIN_SD1WP, "sd1_wp"),
+ PINCTRL_PIN(WMT_PIN_SD1CD, "sd1_cd"),
+ PINCTRL_PIN(WMT_PIN_PWMOUT1, "pwmout1"),
+ PINCTRL_PIN(WMT_PIN_PWMOUT0, "pwmout0"),
+};
+
+/* Order of these names must match the above list */
+static const char * const wm8850_groups[] = {
+ "extgpio0",
+ "extgpio1",
+ "extgpio2",
+ "extgpio3",
+ "extgpio4",
+ "extgpio5",
+ "extgpio6",
+ "extgpio7",
+ "wakeup0",
+ "wakeup1",
+ "wakeup2",
+ "wakeup3",
+ "susgpio0",
+ "susgpio1",
+ "sd0_cd",
+ "vdout0",
+ "vdout1",
+ "vdout2",
+ "vdout3",
+ "vdout4",
+ "vdout5",
+ "vdout6",
+ "vdout7",
+ "vdout8",
+ "vdout9",
+ "vdout10",
+ "vdout11",
+ "vdout12",
+ "vdout13",
+ "vdout14",
+ "vdout15",
+ "vdout16",
+ "vdout17",
+ "vdout18",
+ "vdout19",
+ "vdout20",
+ "vdout21",
+ "vdout22",
+ "vdout23",
+ "vdin0",
+ "vdin1",
+ "vdin2",
+ "vdin3",
+ "vdin4",
+ "vdin5",
+ "vdin6",
+ "vdin7",
+ "spi0_mosi",
+ "spi0_miso",
+ "spi0_ss",
+ "spi0_clk",
+ "spi0_ssb",
+ "sd0_clk",
+ "sd0_cmd",
+ "sd0_wp",
+ "sd0_data0",
+ "sd0_data1",
+ "sd0_data2",
+ "sd0_data3",
+ "sd1_data0",
+ "sd1_data1",
+ "sd1_data2",
+ "sd1_data3",
+ "sd1_data4",
+ "sd1_data5",
+ "sd1_data6",
+ "sd1_data7",
+ "i2c0_scl",
+ "i2c0_sda",
+ "i2c1_scl",
+ "i2c1_sda",
+ "i2c2_scl",
+ "i2c2_sda",
+ "uart0_rts",
+ "uart0_txd",
+ "uart0_cts",
+ "uart0_rxd",
+ "uart1_rts",
+ "uart1_txd",
+ "uart1_cts",
+ "uart1_rxd",
+ "uart2_rts",
+ "uart2_txd",
+ "uart2_cts",
+ "uart2_rxd",
+ "sd2_wp",
+ "sd2_cmd",
+ "sd2_clk",
+ "sd2_pwr",
+ "sd1_clk",
+ "sd1_cmd",
+ "sd1_pwr",
+ "sd1_wp",
+ "sd1_cd",
+ "pwmout1",
+ "pwmout0",
+};
+
+static int wm8850_pinctrl_probe(struct platform_device *pdev)
+{
+ struct wmt_pinctrl_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate data\n");
+ return -ENOMEM;
+ }
+
+ data->banks = wm8850_banks;
+ data->nbanks = ARRAY_SIZE(wm8850_banks);
+ data->pins = wm8850_pins;
+ data->npins = ARRAY_SIZE(wm8850_pins);
+ data->groups = wm8850_groups;
+ data->ngroups = ARRAY_SIZE(wm8850_groups);
+
+ return wmt_pinctrl_probe(pdev, data);
+}
+
+static int wm8850_pinctrl_remove(struct platform_device *pdev)
+{
+ return wmt_pinctrl_remove(pdev);
+}
+
+static struct of_device_id wmt_pinctrl_of_match[] = {
+ { .compatible = "wm,wm8850-pinctrl" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver wmt_pinctrl_driver = {
+ .probe = wm8850_pinctrl_probe,
+ .remove = wm8850_pinctrl_remove,
+ .driver = {
+ .name = "pinctrl-wm8850",
+ .owner = THIS_MODULE,
+ .of_match_table = wmt_pinctrl_of_match,
+ },
+};
+
+module_platform_driver(wmt_pinctrl_driver);
+
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_DESCRIPTION("Wondermedia WM8850 Pincontrol driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
new file mode 100644
index 0000000..ab63104
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -0,0 +1,632 @@
+/*
+ * Pinctrl driver for the Wondermedia SoC's
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-wmt.h"
+
+static inline void wmt_setbits(struct wmt_pinctrl_data *data, u32 reg,
+ u32 mask)
+{
+ u32 val;
+
+ val = readl_relaxed(data->base + reg);
+ val |= mask;
+ writel_relaxed(val, data->base + reg);
+}
+
+static inline void wmt_clearbits(struct wmt_pinctrl_data *data, u32 reg,
+ u32 mask)
+{
+ u32 val;
+
+ val = readl_relaxed(data->base + reg);
+ val &= ~mask;
+ writel_relaxed(val, data->base + reg);
+}
+
+enum wmt_func_sel {
+ WMT_FSEL_GPIO_IN = 0,
+ WMT_FSEL_GPIO_OUT = 1,
+ WMT_FSEL_ALT = 2,
+ WMT_FSEL_COUNT = 3,
+};
+
+static const char * const wmt_functions[WMT_FSEL_COUNT] = {
+ [WMT_FSEL_GPIO_IN] = "gpio_in",
+ [WMT_FSEL_GPIO_OUT] = "gpio_out",
+ [WMT_FSEL_ALT] = "alt",
+};
+
+static int wmt_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return WMT_FSEL_COUNT;
+}
+
+static const char *wmt_pmx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return wmt_functions[selector];
+}
+
+static int wmt_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ /* every pin does every function */
+ *groups = data->groups;
+ *num_groups = data->ngroups;
+
+ return 0;
+}
+
+static int wmt_set_pinmux(struct wmt_pinctrl_data *data, unsigned func,
+ unsigned pin)
+{
+ u32 bank = WMT_BANK_FROM_PIN(pin);
+ u32 bit = WMT_BIT_FROM_PIN(pin);
+ u32 reg_en = data->banks[bank].reg_en;
+ u32 reg_dir = data->banks[bank].reg_dir;
+
+ if (reg_dir == NO_REG) {
+ dev_err(data->dev, "pin:%d no direction register defined\n",
+ pin);
+ return -EINVAL;
+ }
+
+ /*
+ * If reg_en == NO_REG, we assume it is a dedicated GPIO and cannot be
+ * disabled (as on VT8500) and that no alternate function is available.
+ */
+ switch (func) {
+ case WMT_FSEL_GPIO_IN:
+ if (reg_en != NO_REG)
+ wmt_setbits(data, reg_en, BIT(bit));
+ wmt_clearbits(data, reg_dir, BIT(bit));
+ break;
+ case WMT_FSEL_GPIO_OUT:
+ if (reg_en != NO_REG)
+ wmt_setbits(data, reg_en, BIT(bit));
+ wmt_setbits(data, reg_dir, BIT(bit));
+ break;
+ case WMT_FSEL_ALT:
+ if (reg_en == NO_REG) {
+ dev_err(data->dev, "pin:%d no alt function available\n",
+ pin);
+ return -EINVAL;
+ }
+ wmt_clearbits(data, reg_en, BIT(bit));
+ }
+
+ return 0;
+}
+
+static int wmt_pmx_enable(struct pinctrl_dev *pctldev,
+ unsigned func_selector,
+ unsigned group_selector)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+ u32 pinnum = data->pins[group_selector].number;
+
+ return wmt_set_pinmux(data, func_selector, pinnum);
+}
+
+static void wmt_pmx_disable(struct pinctrl_dev *pctldev,
+ unsigned func_selector,
+ unsigned group_selector)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+ u32 pinnum = data->pins[group_selector].number;
+
+ /* disable by setting GPIO_IN */
+ wmt_set_pinmux(data, WMT_FSEL_GPIO_IN, pinnum);
+}
+
+static void wmt_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable by setting GPIO_IN */
+ wmt_set_pinmux(data, WMT_FSEL_GPIO_IN, offset);
+}
+
+static int wmt_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset,
+ bool input)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ wmt_set_pinmux(data, (input ? WMT_FSEL_GPIO_IN : WMT_FSEL_GPIO_OUT),
+ offset);
+
+ return 0;
+}
+
+static struct pinmux_ops wmt_pinmux_ops = {
+ .get_functions_count = wmt_pmx_get_functions_count,
+ .get_function_name = wmt_pmx_get_function_name,
+ .get_function_groups = wmt_pmx_get_function_groups,
+ .enable = wmt_pmx_enable,
+ .disable = wmt_pmx_disable,
+ .gpio_disable_free = wmt_pmx_gpio_disable_free,
+ .gpio_set_direction = wmt_pmx_gpio_set_direction,
+};
+
+static int wmt_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ return data->ngroups;
+}
+
+static const char *wmt_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ return data->groups[selector];
+}
+
+static int wmt_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &data->pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static int wmt_pctl_find_group_by_pin(struct wmt_pinctrl_data *data, u32 pin)
+{
+ int i;
+
+ for (i = 0; i < data->npins; i++) {
+ if (data->pins[i].number == pin)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int wmt_pctl_dt_node_to_map_func(struct wmt_pinctrl_data *data,
+ struct device_node *np,
+ u32 pin, u32 fnum,
+ struct pinctrl_map **maps)
+{
+ int group;
+ struct pinctrl_map *map = *maps;
+
+ if (fnum >= ARRAY_SIZE(wmt_functions)) {
+ dev_err(data->dev, "invalid wm,function %d\n", fnum);
+ return -EINVAL;
+ }
+
+ group = wmt_pctl_find_group_by_pin(data, pin);
+ if (group < 0) {
+ dev_err(data->dev, "unable to match pin %d to group\n", pin);
+ return group;
+ }
+
+ map->type = PIN_MAP_TYPE_MUX_GROUP;
+ map->data.mux.group = data->groups[group];
+ map->data.mux.function = wmt_functions[fnum];
+ (*maps)++;
+
+ return 0;
+}
+
+static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
+ struct device_node *np,
+ u32 pin, u32 pull,
+ struct pinctrl_map **maps)
+{
+ int group;
+ unsigned long *configs;
+ struct pinctrl_map *map = *maps;
+
+ if (pull > 2) {
+ dev_err(data->dev, "invalid wm,pull %d\n", pull);
+ return -EINVAL;
+ }
+
+ group = wmt_pctl_find_group_by_pin(data, pin);
+ if (group < 0) {
+ dev_err(data->dev, "unable to match pin %d to group\n", pin);
+ return group;
+ }
+
+ configs = kzalloc(sizeof(*configs), GFP_KERNEL);
+ if (!configs)
+ return -ENOMEM;
+
+ configs[0] = pull;
+
+ map->type = PIN_MAP_TYPE_CONFIGS_PIN;
+ map->data.configs.group_or_pin = data->groups[group];
+ map->data.configs.configs = configs;
+ map->data.configs.num_configs = 1;
+ (*maps)++;
+
+ return 0;
+}
+
+static void wmt_pctl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *maps,
+ unsigned num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++)
+ if (maps[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(maps[i].data.configs.configs);
+
+ kfree(maps);
+}
+
+static int wmt_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
+{
+ struct pinctrl_map *maps, *cur_map;
+ struct property *pins, *funcs, *pulls;
+ u32 pin, func, pull;
+ int num_pins, num_funcs, num_pulls, maps_per_pin;
+ int i, err;
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+
+ pins = of_find_property(np, "wm,pins", NULL);
+ if (!pins) {
+ dev_err(data->dev, "missing wmt,pins property\n");
+ return -EINVAL;
+ }
+
+ funcs = of_find_property(np, "wm,function", NULL);
+ pulls = of_find_property(np, "wm,pull", NULL);
+
+ if (!funcs && !pulls) {
+ dev_err(data->dev, "neither wm,function nor wm,pull specified\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The following lines calculate how many values are defined for each
+ * of the properties.
+ */
+ num_pins = pins->length / sizeof(u32);
+ num_funcs = funcs ? (funcs->length / sizeof(u32)) : 0;
+ num_pulls = pulls ? (pulls->length / sizeof(u32)) : 0;
+
+ if (num_funcs > 1 && num_funcs != num_pins) {
+ dev_err(data->dev, "wm,function must have 1 or %d entries\n",
+ num_pins);
+ return -EINVAL;
+ }
+
+ if (num_pulls > 1 && num_pulls != num_pins) {
+ dev_err(data->dev, "wm,pull must have 1 or %d entries\n",
+ num_pins);
+ return -EINVAL;
+ }
+
+ maps_per_pin = 0;
+ if (num_funcs)
+ maps_per_pin++;
+ if (num_pulls)
+ maps_per_pin++;
+
+ cur_map = maps = kzalloc(num_pins * maps_per_pin * sizeof(*maps),
+ GFP_KERNEL);
+ if (!maps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pins; i++) {
+ err = of_property_read_u32_index(np, "wm,pins", i, &pin);
+ if (err)
+ goto fail;
+
+ if (pin >= (data->nbanks * 32)) {
+ dev_err(data->dev, "invalid wm,pins value\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (num_funcs) {
+ err = of_property_read_u32_index(np, "wm,function",
+ (num_funcs > 1 ? i : 0), &func);
+ if (err)
+ goto fail;
+
+ err = wmt_pctl_dt_node_to_map_func(data, np, pin, func,
+ &cur_map);
+ if (err)
+ goto fail;
+ }
+
+ if (num_pulls) {
+ err = of_property_read_u32_index(np, "wm,pull",
+ (num_pulls > 1 ? i : 0), &pull);
+ if (err)
+ goto fail;
+
+ err = wmt_pctl_dt_node_to_map_pull(data, np, pin, pull,
+ &cur_map);
+ if (err)
+ goto fail;
+ }
+ }
+ *map = maps;
+ *num_maps = num_pins * maps_per_pin;
+ return 0;
+
+/*
+ * The fail path removes any maps that have been allocated. The fail path is
+ * only called from code after maps has been kzalloc'd. It is also safe to
+ * pass 'num_pins * maps_per_pin' as the map count even though we probably
+ * failed before all the mappings were read as all maps are allocated at once,
+ * and configs are only allocated for .type = PIN_MAP_TYPE_CONFIGS_PIN - there
+ * is no failpath where a config can be allocated without .type being set.
+ */
+fail:
+ wmt_pctl_dt_free_map(pctldev, maps, num_pins * maps_per_pin);
+ return err;
+}
+
+static struct pinctrl_ops wmt_pctl_ops = {
+ .get_groups_count = wmt_get_groups_count,
+ .get_group_name = wmt_get_group_name,
+ .get_group_pins = wmt_get_group_pins,
+ .dt_node_to_map = wmt_pctl_dt_node_to_map,
+ .dt_free_map = wmt_pctl_dt_free_map,
+};
+
+static int wmt_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ return -ENOTSUPP;
+}
+
+static int wmt_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config)
+{
+ struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u16 arg = pinconf_to_config_argument(config);
+ u32 bank = WMT_BANK_FROM_PIN(pin);
+ u32 bit = WMT_BIT_FROM_PIN(pin);
+ u32 reg_pull_en = data->banks[bank].reg_pull_en;
+ u32 reg_pull_cfg = data->banks[bank].reg_pull_cfg;
+
+ if ((reg_pull_en == NO_REG) || (reg_pull_cfg == NO_REG)) {
+ dev_err(data->dev, "bias functions not supported on pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+
+ if ((param == PIN_CONFIG_BIAS_PULL_DOWN) ||
+ (param == PIN_CONFIG_BIAS_PULL_UP)) {
+ if (arg == 0)
+ param = PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ wmt_clearbits(data, reg_pull_en, BIT(bit));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ wmt_clearbits(data, reg_pull_cfg, BIT(bit));
+ wmt_setbits(data, reg_pull_en, BIT(bit));
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ wmt_setbits(data, reg_pull_cfg, BIT(bit));
+ wmt_setbits(data, reg_pull_en, BIT(bit));
+ break;
+ default:
+ dev_err(data->dev, "unknown pinconf param\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pinconf_ops wmt_pinconf_ops = {
+ .pin_config_get = wmt_pinconf_get,
+ .pin_config_set = wmt_pinconf_set,
+};
+
+static struct pinctrl_desc wmt_desc = {
+ .owner = THIS_MODULE,
+ .name = "pinctrl-wmt",
+ .pctlops = &wmt_pctl_ops,
+ .pmxops = &wmt_pinmux_ops,
+ .confops = &wmt_pinconf_ops,
+};
+
+static int wmt_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void wmt_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
+ u32 bank = WMT_BANK_FROM_PIN(offset);
+ u32 bit = WMT_BIT_FROM_PIN(offset);
+ u32 reg_dir = data->banks[bank].reg_dir;
+ u32 val;
+
+ val = readl_relaxed(data->base + reg_dir);
+ if (val & BIT(bit))
+ return GPIOF_DIR_OUT;
+ else
+ return GPIOF_DIR_IN;
+}
+
+static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
+ u32 bank = WMT_BANK_FROM_PIN(offset);
+ u32 bit = WMT_BIT_FROM_PIN(offset);
+ u32 reg_data_in = data->banks[bank].reg_data_in;
+
+ if (reg_data_in == NO_REG) {
+ dev_err(data->dev, "no data in register defined\n");
+ return -EINVAL;
+ }
+
+ return !!(readl_relaxed(data->base + reg_data_in) & BIT(bit));
+}
+
+static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
+ u32 bank = WMT_BANK_FROM_PIN(offset);
+ u32 bit = WMT_BIT_FROM_PIN(offset);
+ u32 reg_data_out = data->banks[bank].reg_data_out;
+
+ if (reg_data_out == NO_REG) {
+ dev_err(data->dev, "no data out register defined\n");
+ return;
+ }
+
+ if (val)
+ wmt_setbits(data, reg_data_out, BIT(bit));
+ else
+ wmt_clearbits(data, reg_data_out, BIT(bit));
+}
+
+static struct gpio_chip wmt_gpio_chip = {
+ .label = "gpio-wmt",
+ .owner = THIS_MODULE,
+ .request = wmt_gpio_request,
+ .free = wmt_gpio_free,
+ .get_direction = wmt_gpio_get_direction,
+ .direction_input = wmt_gpio_direction_input,
+ .direction_output = wmt_gpio_direction_output,
+ .get = wmt_gpio_get_value,
+ .set = wmt_gpio_set_value,
+ .can_sleep = 0,
+};
+
+int wmt_pinctrl_probe(struct platform_device *pdev,
+ struct wmt_pinctrl_data *data)
+{
+ int err;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!data->base) {
+ dev_err(&pdev->dev, "failed to map memory resource\n");
+ return -EBUSY;
+ }
+
+ wmt_desc.pins = data->pins;
+ wmt_desc.npins = data->npins;
+
+ data->gpio_chip = wmt_gpio_chip;
+ data->gpio_chip.dev = &pdev->dev;
+ data->gpio_chip.of_node = pdev->dev.of_node;
+ data->gpio_chip.ngpio = data->nbanks * 32;
+
+ platform_set_drvdata(pdev, data);
+
+ data->dev = &pdev->dev;
+
+ data->pctl_dev = pinctrl_register(&wmt_desc, &pdev->dev, data);
+ if (!data->pctl_dev) {
+ dev_err(&pdev->dev, "Failed to register pinctrl\n");
+ return -EINVAL;
+ }
+
+ err = gpiochip_add(&data->gpio_chip);
+ if (err) {
+ dev_err(&pdev->dev, "could not add GPIO chip\n");
+ goto fail_gpio;
+ }
+
+ err = gpiochip_add_pin_range(&data->gpio_chip, dev_name(data->dev),
+ 0, 0, data->nbanks * 32);
+ if (err)
+ goto fail_range;
+
+ dev_info(&pdev->dev, "Pin controller initialized\n");
+
+ return 0;
+
+fail_range:
+ err = gpiochip_remove(&data->gpio_chip);
+ if (err)
+ dev_err(&pdev->dev, "failed to remove gpio chip\n");
+fail_gpio:
+ pinctrl_unregister(data->pctl_dev);
+ return err;
+}
+
+int wmt_pinctrl_remove(struct platform_device *pdev)
+{
+ struct wmt_pinctrl_data *data = platform_get_drvdata(pdev);
+ int err;
+
+ err = gpiochip_remove(&data->gpio_chip);
+ if (err)
+ dev_err(&pdev->dev, "failed to remove gpio chip\n");
+
+ pinctrl_unregister(data->pctl_dev);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.h b/drivers/pinctrl/vt8500/pinctrl-wmt.h
new file mode 100644
index 0000000..41f5f2d
--- /dev/null
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.h
@@ -0,0 +1,79 @@
+/*
+ * Pinctrl driver for the Wondermedia SoC's
+ *
+ * Copyright (c) 2013 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/gpio.h>
+
+/* VT8500 has no enable register in the extgpio bank. */
+#define NO_REG 0xFFFF
+
+#define WMT_PINCTRL_BANK(__en, __dir, __dout, __din, __pen, __pcfg) \
+{ \
+ .reg_en = __en, \
+ .reg_dir = __dir, \
+ .reg_data_out = __dout, \
+ .reg_data_in = __din, \
+ .reg_pull_en = __pen, \
+ .reg_pull_cfg = __pcfg, \
+}
+
+/* Encode/decode the bank/bit pairs into a pin value */
+#define WMT_PIN(__bank, __offset) ((__bank << 5) | __offset)
+#define WMT_BANK_FROM_PIN(__pin) (__pin >> 5)
+#define WMT_BIT_FROM_PIN(__pin) (__pin & 0x1f)
+
+#define WMT_GROUP(__name, __data) \
+{ \
+ .name = __name, \
+ .pins = __data, \
+ .npins = ARRAY_SIZE(__data), \
+}
+
+struct wmt_pinctrl_bank_registers {
+ u32 reg_en;
+ u32 reg_dir;
+ u32 reg_data_out;
+ u32 reg_data_in;
+
+ u32 reg_pull_en;
+ u32 reg_pull_cfg;
+};
+
+struct wmt_pinctrl_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned npins;
+};
+
+struct wmt_pinctrl_data {
+ struct device *dev;
+ struct pinctrl_dev *pctl_dev;
+
+ /* must be initialized before calling wmt_pinctrl_probe */
+ void __iomem *base;
+ const struct wmt_pinctrl_bank_registers *banks;
+ const struct pinctrl_pin_desc *pins;
+ const char * const *groups;
+
+ u32 nbanks;
+ u32 npins;
+ u32 ngroups;
+
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+};
+
+int wmt_pinctrl_probe(struct platform_device *pdev,
+ struct wmt_pinctrl_data *data);
+int wmt_pinctrl_remove(struct platform_device *pdev);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index edec135..54d31c0 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -844,14 +844,14 @@ static int dispatch_proc_show(struct seq_file *m, void *v)
static int dispatch_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dispatch_proc_show, PDE(inode)->data);
+ return single_open(file, dispatch_proc_show, PDE_DATA(inode));
}
static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = PDE(file_inode(file))->data;
+ struct ibm_struct *ibm = PDE_DATA(file_inode(file));
char *kernbuf;
int ret;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 242abac..eb3467e 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -553,7 +553,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
static int lcd_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, lcd_proc_show, PDE(inode)->data);
+ return single_open(file, lcd_proc_show, PDE_DATA(inode));
}
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
@@ -583,7 +583,7 @@ static int set_lcd_status(struct backlight_device *bd)
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
+ struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
int value;
@@ -644,13 +644,13 @@ static int video_proc_show(struct seq_file *m, void *v)
static int video_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, video_proc_show, PDE(inode)->data);
+ return single_open(file, video_proc_show, PDE_DATA(inode));
}
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
+ struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char *cmd, *buffer;
int ret;
int value;
@@ -744,13 +744,13 @@ static int fan_proc_show(struct seq_file *m, void *v)
static int fan_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, fan_proc_show, PDE(inode)->data);
+ return single_open(file, fan_proc_show, PDE_DATA(inode));
}
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
+ struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
int value;
@@ -816,13 +816,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
static int keys_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, keys_proc_show, PDE(inode)->data);
+ return single_open(file, keys_proc_show, PDE_DATA(inode));
}
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
+ struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
int value;
@@ -859,7 +859,7 @@ static int version_proc_show(struct seq_file *m, void *v)
static int version_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, version_proc_show, PDE(inode)->data);
+ return single_open(file, version_proc_show, PDE_DATA(inode));
}
static const struct file_operations version_proc_fops = {
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 65f735a..2365ef3 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -55,9 +55,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
size_t nbytes, loff_t * ppos)
{
- struct inode *ino = file_inode(file);
- struct proc_dir_entry *dp = PDE(ino);
- struct pnp_dev *dev = dp->data;
+ struct pnp_dev *dev = PDE_DATA(file_inode(file));
int pos = *ppos;
int cnt, size = 256;
@@ -107,7 +105,7 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev)
&isapnp_proc_bus_file_operations, dev);
if (!e)
return -ENOMEM;
- e->size = 256;
+ proc_set_size(e, 256);
return 0;
}
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 5d66e55..9b86a01 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -513,10 +513,6 @@ static int __init pnpbios_init(void)
{
int ret;
-#if defined(CONFIG_PPC)
- if (check_legacy_ioport(PNPBIOS_BASE))
- return -ENODEV;
-#endif
if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
paravirt_enabled()) {
printk(KERN_INFO "PnPBIOS: Disabled\n");
@@ -570,10 +566,7 @@ fs_initcall(pnpbios_init);
static int __init pnpbios_thread_init(void)
{
struct task_struct *task;
-#if defined(CONFIG_PPC)
- if (check_legacy_ioport(PNPBIOS_BASE))
- return 0;
-#endif
+
if (pnpbios_disabled)
return 0;
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 1c03ee8..c212db0 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -237,13 +237,13 @@ static int pnpbios_proc_show(struct seq_file *m, void *v)
static int pnpbios_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, pnpbios_proc_show, PDE(inode)->data);
+ return single_open(file, pnpbios_proc_show, PDE_DATA(inode));
}
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE(file_inode(file))->data;
+ void *data = PDE_DATA(file_inode(file));
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 1a1dcb8..cbde1d6 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -42,6 +42,7 @@ static int rx51_battery_read_adc(int channel)
req.method = TWL4030_MADC_SW1;
req.func_cb = NULL;
req.type = TWL4030_MADC_WAIT;
+ req.raw = true;
if (twl4030_madc_conversion(&req) <= 0)
return -ENODATA;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index e1b4705..38a8bbe 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/irqnr.h>
#include <linux/time.h>
+#include <linux/slab.h>
#include <linux/parport.h>
#include <linux/pps_kernel.h>
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 0e0bfa0..115b644 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -147,8 +147,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
- depends on SOC_AM33XX
- select PWM_TIPWMSS
+ depends on SOC_AM33XX || ARCH_DAVINCI_DA8XX
help
PWM driver support for the ECAP APWM controller found on AM33XX
TI SOC
@@ -158,8 +157,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
- depends on SOC_AM33XX
- select PWM_TIPWMSS
+ depends on SOC_AM33XX || ARCH_DAVINCI_DA8XX
help
PWM driver support for the EHRPWM controller found on AM33XX
TI SOC
@@ -169,7 +167,7 @@ config PWM_TIEHRPWM
config PWM_TIPWMSS
bool
- depends on SOC_AM33XX && (PWM_TIEHRPWM || PWM_TIECAP)
+ default y if SOC_AM33XX && (PWM_TIECAP || PWM_TIEHRPWM)
help
PWM Subsystem driver support for AM33xx SOC.
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 4248d04..1d07a6f 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -66,7 +66,7 @@ static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << (chip->base - 1), ENABLE_PWM);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
+ dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
return ret;
}
@@ -88,6 +88,7 @@ static const struct pwm_ops ab8500_pwm_ops = {
.config = ab8500_pwm_config,
.enable = ab8500_pwm_enable,
.disable = ab8500_pwm_disable,
+ .owner = THIS_MODULE,
};
static int ab8500_pwm_probe(struct platform_device *pdev)
@@ -99,7 +100,7 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
*/
- ab8500 = kzalloc(sizeof(*ab8500), GFP_KERNEL);
+ ab8500 = devm_kzalloc(&pdev->dev, sizeof(*ab8500), GFP_KERNEL);
if (ab8500 == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -111,10 +112,8 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.npwm = 1;
err = pwmchip_add(&ab8500->chip);
- if (err < 0) {
- kfree(ab8500);
+ if (err < 0)
return err;
- }
dev_dbg(&pdev->dev, "pwm probe successful\n");
platform_set_drvdata(pdev, ab8500);
@@ -132,7 +131,6 @@ static int ab8500_pwm_remove(struct platform_device *pdev)
return err;
dev_dbg(&pdev->dev, "pwm driver removed\n");
- kfree(ab8500);
return 0;
}
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 16cb530..0a7b658 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -358,6 +358,7 @@ static const struct pwm_ops atmel_tcb_pwm_ops = {
.set_polarity = atmel_tcb_pwm_set_polarity,
.enable = atmel_tcb_pwm_enable,
.disable = atmel_tcb_pwm_disable,
+ .owner = THIS_MODULE,
};
static int atmel_tcb_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 3f5677b..ec28798 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -43,7 +43,6 @@ struct imx_chip {
struct clk *clk_per;
struct clk *clk_ipg;
- int enabled;
void __iomem *mmio_base;
struct pwm_chip chip;
@@ -135,7 +134,7 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH;
- if (imx->enabled)
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
cr |= MX3_PWMCR_EN;
writel(cr, imx->mmio_base + MX3_PWMCR);
@@ -186,8 +185,6 @@ static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
imx->set_enable(chip, true);
- imx->enabled = 1;
-
return 0;
}
@@ -198,7 +195,6 @@ static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
imx->set_enable(chip, false);
clk_disable_unprepare(imx->clk_per);
- imx->enabled = 0;
}
static struct pwm_ops imx_pwm_ops = {
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index b3f0d0d..8272883 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -37,6 +37,7 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
unsigned long long c;
int period_cycles, duty_cycles;
+ u32 val;
c = clk_get_rate(lpc32xx->clk) / 256;
c = c * period_ns;
@@ -68,8 +69,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
c = 255;
duty_cycles = 256 - c;
- writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles),
- lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val &= ~0xFFFF;
+ val |= PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles);
+ writel(val, lpc32xx->base + (pwm->hwpwm << 2));
return 0;
}
@@ -77,15 +80,29 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+ u32 val;
+ int ret;
+
+ ret = clk_enable(lpc32xx->clk);
+ if (ret)
+ return ret;
- return clk_enable(lpc32xx->clk);
+ val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val |= PWM_ENABLE;
+ writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+
+ return 0;
}
static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+ u32 val;
+
+ val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val &= ~PWM_ENABLE;
+ writel(val, lpc32xx->base + (pwm->hwpwm << 2));
- writel(0, lpc32xx->base + (pwm->hwpwm << 2));
clk_disable(lpc32xx->clk);
}
@@ -145,7 +162,7 @@ static int lpc32xx_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&lpc32xx->chip);
}
-static struct of_device_id lpc32xx_pwm_dt_ids[] = {
+static const struct of_device_id lpc32xx_pwm_dt_ids[] = {
{ .compatible = "nxp,lpc3220-pwm", },
{ /* sentinel */ }
};
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index a53d309..3febddd 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -38,7 +38,6 @@
struct mxs_pwm_chip {
struct pwm_chip chip;
- struct device *dev;
struct clk *clk;
void __iomem *base;
};
@@ -166,7 +165,6 @@ static int mxs_pwm_probe(struct platform_device *pdev)
return ret;
}
- mxs->dev = &pdev->dev;
platform_set_drvdata(pdev, mxs);
stmp_reset_block(mxs->base);
@@ -181,7 +179,7 @@ static int mxs_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&mxs->chip);
}
-static struct of_device_id mxs_pwm_dt_ids[] = {
+static const struct of_device_id mxs_pwm_dt_ids[] = {
{ .compatible = "fsl,imx23-pwm", },
{ /* sentinel */ }
};
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
index db964e6..d1eb499 100644
--- a/drivers/pwm/pwm-puv3.c
+++ b/drivers/pwm/pwm-puv3.c
@@ -27,7 +27,6 @@ struct puv3_pwm_chip {
struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
- bool enabled;
};
static inline struct puv3_pwm_chip *to_puv3(struct pwm_chip *chip)
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 20370e6..dee6ab55 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -23,14 +23,13 @@
#include <asm/div64.h>
#define HAS_SECONDARY_PWM 0x10
-#define PWM_ID_BASE(d) ((d) & 0xf)
static const struct platform_device_id pwm_id_table[] = {
/* PWM has_secondary_pwm? */
{ "pxa25x-pwm", 0 },
- { "pxa27x-pwm", 0 | HAS_SECONDARY_PWM },
- { "pxa168-pwm", 1 },
- { "pxa910-pwm", 1 },
+ { "pxa27x-pwm", HAS_SECONDARY_PWM },
+ { "pxa168-pwm", 0 },
+ { "pxa910-pwm", 0 },
{ },
};
MODULE_DEVICE_TABLE(platform, pwm_id_table);
@@ -48,7 +47,6 @@ struct pxa_pwm_chip {
struct device *dev;
struct clk *clk;
- int clk_enabled;
void __iomem *mmio_base;
};
@@ -108,24 +106,15 @@ static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
static int pxa_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
- int rc = 0;
- if (!pc->clk_enabled) {
- rc = clk_prepare_enable(pc->clk);
- if (!rc)
- pc->clk_enabled++;
- }
- return rc;
+ return clk_prepare_enable(pc->clk);
}
static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
- if (pc->clk_enabled) {
- clk_disable_unprepare(pc->clk);
- pc->clk_enabled--;
- }
+ clk_disable_unprepare(pc->clk);
}
static struct pwm_ops pxa_pwm_ops = {
@@ -152,8 +141,6 @@ static int pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
- pwm->clk_enabled = 0;
-
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &pxa_pwm_ops;
pwm->chip.base = -1;
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 5207e6c..a0ece50 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -289,10 +289,10 @@ static int s3c_pwm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int s3c_pwm_suspend(struct device *dev)
{
- struct s3c_chip *s3c = platform_get_drvdata(pdev);
+ struct s3c_chip *s3c = dev_get_drvdata(dev);
/* No one preserve these values during suspend so reset them
* Otherwise driver leaves PWM unconfigured if same values
@@ -304,9 +304,9 @@ static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int s3c_pwm_resume(struct platform_device *pdev)
+static int s3c_pwm_resume(struct device *dev)
{
- struct s3c_chip *s3c = platform_get_drvdata(pdev);
+ struct s3c_chip *s3c = dev_get_drvdata(dev);
unsigned long tcon;
/* Restore invertion */
@@ -316,21 +316,19 @@ static int s3c_pwm_resume(struct platform_device *pdev)
return 0;
}
-
-#else
-#define s3c_pwm_suspend NULL
-#define s3c_pwm_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(s3c_pwm_pm_ops, s3c_pwm_suspend,
+ s3c_pwm_resume);
+
static struct platform_driver s3c_pwm_driver = {
.driver = {
.name = "s3c24xx-pwm",
.owner = THIS_MODULE,
+ .pm = &s3c_pwm_pm_ops,
},
.probe = s3c_pwm_probe,
.remove = s3c_pwm_remove,
- .suspend = s3c_pwm_suspend,
- .resume = s3c_pwm_resume,
};
static int __init pwm_init(void)
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 69a2d9e..6d99e2c 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -49,13 +49,11 @@
* @mmio_base: base address of pwm chip
* @clk: pointer to clk structure of pwm chip
* @chip: linux pwm chip representation
- * @dev: pointer to device structure of pwm chip
*/
struct spear_pwm_chip {
void __iomem *mmio_base;
struct clk *clk;
struct pwm_chip chip;
- struct device *dev;
};
static inline struct spear_pwm_chip *to_spear_pwm_chip(struct pwm_chip *chip)
@@ -143,7 +141,7 @@ static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
u32 val;
rc = clk_enable(pc->clk);
- if (!rc)
+ if (rc)
return rc;
val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
@@ -200,7 +198,6 @@ static int spear_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->clk))
return PTR_ERR(pc->clk);
- pc->dev = &pdev->dev;
platform_set_drvdata(pdev, pc);
pc->chip.dev = &pdev->dev;
@@ -209,12 +206,12 @@ static int spear_pwm_probe(struct platform_device *pdev)
pc->chip.npwm = NUM_PWM;
ret = clk_prepare(pc->clk);
- if (!ret)
+ if (ret)
return ret;
if (of_device_is_compatible(np, "st,spear1340-pwm")) {
ret = clk_enable(pc->clk);
- if (!ret) {
+ if (ret) {
clk_unprepare(pc->clk);
return ret;
}
@@ -251,7 +248,7 @@ static int spear_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
-static struct of_device_id spear_pwm_of_match[] = {
+static const struct of_device_id spear_pwm_of_match[] = {
{ .compatible = "st,spear320-pwm" },
{ .compatible = "st,spear1340-pwm" },
{ }
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index af3ab48..3d75f4a 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -233,7 +233,7 @@ static int tegra_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
-static struct of_device_id tegra_pwm_of_match[] = {
+static const struct of_device_id tegra_pwm_of_match[] = {
{ .compatible = "nvidia,tegra20-pwm" },
{ .compatible = "nvidia,tegra30-pwm" },
{ }
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 22e96e2..0d65fb2 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -295,7 +295,7 @@ static int ecap_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
-void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
+static void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
{
pm_runtime_get_sync(pc->chip.dev);
pc->ctx.ecctl2 = readw(pc->mmio_base + ECCTL2);
@@ -304,13 +304,14 @@ void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
pm_runtime_put_sync(pc->chip.dev);
}
-void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
+static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
{
writel(pc->ctx.cap3, pc->mmio_base + CAP3);
writel(pc->ctx.cap4, pc->mmio_base + CAP4);
writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2);
}
+#ifdef CONFIG_PM_SLEEP
static int ecap_pwm_suspend(struct device *dev)
{
struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
@@ -337,6 +338,7 @@ static int ecap_pwm_resume(struct device *dev)
ecap_pwm_restore_context(pc);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 8b4c86f..6a21759 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -533,7 +533,7 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
-void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
+static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
{
pm_runtime_get_sync(pc->chip.dev);
pc->ctx.tbctl = ehrpwm_read(pc->mmio_base, TBCTL);
@@ -547,7 +547,7 @@ void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
pm_runtime_put_sync(pc->chip.dev);
}
-void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
+static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
{
ehrpwm_write(pc->mmio_base, TBPRD, pc->ctx.tbprd);
ehrpwm_write(pc->mmio_base, CMPA, pc->ctx.cmpa);
@@ -559,6 +559,7 @@ void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
ehrpwm_write(pc->mmio_base, TBCTL, pc->ctx.tbctl);
}
+#ifdef CONFIG_PM_SLEEP
static int ehrpwm_pwm_suspend(struct device *dev)
{
struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
@@ -594,6 +595,7 @@ static int ehrpwm_pwm_resume(struct device *dev)
ehrpwm_pwm_restore_context(pc);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
ehrpwm_pwm_resume);
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 17cbc59..c9c3d3a 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -101,6 +101,7 @@ static int pwmss_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int pwmss_suspend(struct device *dev)
{
struct pwmss_info *info = dev_get_drvdata(dev);
@@ -118,6 +119,7 @@ static int pwmss_resume(struct device *dev)
writew(info->pwmss_clkconfig, info->mmio_base + PWMSS_CLKCONFIG);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
diff --git a/drivers/pwm/pwm-tipwmss.h b/drivers/pwm/pwm-tipwmss.h
index 11f76a1..10ad804 100644
--- a/drivers/pwm/pwm-tipwmss.h
+++ b/drivers/pwm/pwm-tipwmss.h
@@ -18,7 +18,6 @@
#ifndef __TIPWMSS_H
#define __TIPWMSS_H
-#ifdef CONFIG_PWM_TIPWMSS
/* PWM substem clock gating */
#define PWMSS_ECAPCLK_EN BIT(0)
#define PWMSS_ECAPCLK_STOP_REQ BIT(1)
@@ -28,6 +27,7 @@
#define PWMSS_ECAPCLK_EN_ACK BIT(0)
#define PWMSS_EPWMCLK_EN_ACK BIT(8)
+#ifdef CONFIG_PWM_TIPWMSS
extern u16 pwmss_submodule_state_change(struct device *dev, int set);
#else
static inline u16 pwmss_submodule_state_change(struct device *dev, int set)
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 83e25d4..29d1bba 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -271,6 +271,7 @@ static const struct pwm_ops twl4030_pwmled_ops = {
.enable = twl4030_pwmled_enable,
.disable = twl4030_pwmled_disable,
.config = twl4030_pwmled_config,
+ .owner = THIS_MODULE,
};
static const struct pwm_ops twl6030_pwmled_ops = {
@@ -279,6 +280,7 @@ static const struct pwm_ops twl6030_pwmled_ops = {
.config = twl6030_pwmled_config,
.request = twl6030_pwmled_request,
.free = twl6030_pwmled_free,
+ .owner = THIS_MODULE,
};
static int twl_pwmled_probe(struct platform_device *pdev)
@@ -321,7 +323,7 @@ static int twl_pwmled_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id twl_pwmled_of_match[] = {
+static const struct of_device_id twl_pwmled_of_match[] = {
{ .compatible = "ti,twl4030-pwmled" },
{ .compatible = "ti,twl6030-pwmled" },
{ },
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index bf3fda2..eef9105 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -248,7 +248,7 @@ static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
twl->twl6030_toggle3 = val;
out:
mutex_unlock(&twl->mutex);
- return 0;
+ return ret;
}
static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -287,12 +287,14 @@ static const struct pwm_ops twl4030_pwm_ops = {
.disable = twl4030_pwm_disable,
.request = twl4030_pwm_request,
.free = twl4030_pwm_free,
+ .owner = THIS_MODULE,
};
static const struct pwm_ops twl6030_pwm_ops = {
.config = twl_pwm_config,
.enable = twl6030_pwm_enable,
.disable = twl6030_pwm_disable,
+ .owner = THIS_MODULE,
};
static int twl_pwm_probe(struct platform_device *pdev)
@@ -333,7 +335,7 @@ static int twl_pwm_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id twl_pwm_of_match[] = {
+static const struct of_device_id twl_pwm_of_match[] = {
{ .compatible = "ti,twl4030-pwm" },
{ .compatible = "ti,twl6030-pwm" },
{ },
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index c6d77e2..d4d377c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -4,13 +4,15 @@ menu "Remoteproc drivers"
config REMOTEPROC
tristate
depends on HAS_DMA
+ select CRC32
select FW_LOADER
select VIRTIO
+ select VIRTUALIZATION
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
depends on HAS_DMA
- depends on ARCH_OMAP4
+ depends on ARCH_OMAP4 || SOC_OMAP5
depends on OMAP_IOMMU
depends on OMAP_MBOX_FWK
select REMOTEPROC
@@ -38,4 +40,27 @@ config STE_MODEM_RPROC
This can be either built-in or a loadable module.
If unsure say N.
+config DA8XX_REMOTEPROC
+ tristate "DA8xx/OMAP-L13x remoteproc support"
+ depends on ARCH_DAVINCI_DA8XX
+ select CMA
+ select REMOTEPROC
+ select RPMSG
+ help
+ Say y here to support DA8xx/OMAP-L13x remote processors via the
+ remote processor framework.
+
+ You want to say y here in order to enable AMP
+ use-cases to run on your platform (multimedia codecs are
+ offloaded to remote DSP processors using this framework).
+
+ This module controls the name of the firmware file that gets
+ loaded on the DSP. This file must reside in the /lib/firmware
+ directory. It can be specified via the module parameter
+ da8xx_fw_name=<filename>, and if not specified will default to
+ "rproc-dsp-fw".
+
+ It's safe to say n here if you're not interested in multimedia
+ offloading.
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 391b651..ac2ff75 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -9,3 +9,4 @@ remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
+obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
new file mode 100644
index 0000000..9b2e60a
--- /dev/null
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -0,0 +1,324 @@
+/*
+ * Remote processor machine-specific module for DA8XX
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+#include <mach/clock.h> /* for davinci_clk_reset_assert/deassert() */
+
+#include "remoteproc_internal.h"
+
+static char *da8xx_fw_name;
+module_param(da8xx_fw_name, charp, S_IRUGO);
+MODULE_PARM_DESC(da8xx_fw_name,
+ "\n\t\tName of DSP firmware file in /lib/firmware"
+ " (if not specified defaults to 'rproc-dsp-fw')");
+
+/*
+ * OMAP-L138 Technical References:
+ * http://www.ti.com/product/omap-l138
+ */
+#define SYSCFG_CHIPSIG0 BIT(0)
+#define SYSCFG_CHIPSIG1 BIT(1)
+#define SYSCFG_CHIPSIG2 BIT(2)
+#define SYSCFG_CHIPSIG3 BIT(3)
+#define SYSCFG_CHIPSIG4 BIT(4)
+
+/**
+ * struct da8xx_rproc - da8xx remote processor instance state
+ * @rproc: rproc handle
+ * @dsp_clk: placeholder for platform's DSP clk
+ * @ack_fxn: chip-specific ack function for ack'ing irq
+ * @irq_data: ack_fxn function parameter
+ * @chipsig: virt ptr to DSP interrupt registers (CHIPSIG & CHIPSIG_CLR)
+ * @bootreg: virt ptr to DSP boot address register (HOST1CFG)
+ * @irq: irq # used by this instance
+ */
+struct da8xx_rproc {
+ struct rproc *rproc;
+ struct clk *dsp_clk;
+ void (*ack_fxn)(struct irq_data *data);
+ struct irq_data *irq_data;
+ void __iomem *chipsig;
+ void __iomem *bootreg;
+ int irq;
+};
+
+/**
+ * handle_event() - inbound virtqueue message workqueue function
+ *
+ * This function is registered as a kernel thread and is scheduled by the
+ * kernel handler.
+ */
+static irqreturn_t handle_event(int irq, void *p)
+{
+ struct rproc *rproc = (struct rproc *)p;
+
+ /* Process incoming buffers on all our vrings */
+ rproc_vq_interrupt(rproc, 0);
+ rproc_vq_interrupt(rproc, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * da8xx_rproc_callback() - inbound virtqueue message handler
+ *
+ * This handler is invoked directly by the kernel whenever the remote
+ * core (DSP) has modified the state of a virtqueue. There is no
+ * "payload" message indicating the virtqueue index as is the case with
+ * mailbox-based implementations on OMAP4. As such, this handler "polls"
+ * each known virtqueue index for every invocation.
+ */
+static irqreturn_t da8xx_rproc_callback(int irq, void *p)
+{
+ struct rproc *rproc = (struct rproc *)p;
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+ u32 chipsig;
+
+ chipsig = readl(drproc->chipsig);
+ if (chipsig & SYSCFG_CHIPSIG0) {
+ /* Clear interrupt level source */
+ writel(SYSCFG_CHIPSIG0, drproc->chipsig + 4);
+
+ /*
+ * ACK intr to AINTC.
+ *
+ * It has already been ack'ed by the kernel before calling
+ * this function, but since the ARM<->DSP interrupts in the
+ * CHIPSIG register are "level" instead of "pulse" variety,
+ * we need to ack it after taking down the level else we'll
+ * be called again immediately after returning.
+ */
+ drproc->ack_fxn(drproc->irq_data);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int da8xx_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+ struct clk *dsp_clk = drproc->dsp_clk;
+
+ /* hw requires the start (boot) address be on 1KB boundary */
+ if (rproc->bootaddr & 0x3ff) {
+ dev_err(dev, "invalid boot address: must be aligned to 1KB\n");
+
+ return -EINVAL;
+ }
+
+ writel(rproc->bootaddr, drproc->bootreg);
+
+ clk_enable(dsp_clk);
+ davinci_clk_reset_deassert(dsp_clk);
+
+ return 0;
+}
+
+static int da8xx_rproc_stop(struct rproc *rproc)
+{
+ struct da8xx_rproc *drproc = rproc->priv;
+
+ clk_disable(drproc->dsp_clk);
+
+ return 0;
+}
+
+/* kick a virtqueue */
+static void da8xx_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+
+ /* Interupt remote proc */
+ writel(SYSCFG_CHIPSIG2, drproc->chipsig);
+}
+
+static struct rproc_ops da8xx_rproc_ops = {
+ .start = da8xx_rproc_start,
+ .stop = da8xx_rproc_stop,
+ .kick = da8xx_rproc_kick,
+};
+
+static int reset_assert(struct device *dev)
+{
+ struct clk *dsp_clk;
+
+ dsp_clk = clk_get(dev, NULL);
+ if (IS_ERR(dsp_clk)) {
+ dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
+ return PTR_RET(dsp_clk);
+ }
+
+ davinci_clk_reset_assert(dsp_clk);
+ clk_put(dsp_clk);
+
+ return 0;
+}
+
+static int da8xx_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct da8xx_rproc *drproc;
+ struct rproc *rproc;
+ struct irq_data *irq_data;
+ struct resource *bootreg_res;
+ struct resource *chipsig_res;
+ struct clk *dsp_clk;
+ void __iomem *chipsig;
+ void __iomem *bootreg;
+ int irq;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "platform_get_irq(pdev, 0) error: %d\n", irq);
+ return irq;
+ }
+
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ dev_err(dev, "irq_get_irq_data(%d): NULL\n", irq);
+ return -EINVAL;
+ }
+
+ bootreg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!bootreg_res) {
+ dev_err(dev,
+ "platform_get_resource(IORESOURCE_MEM, 0): NULL\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ chipsig_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!chipsig_res) {
+ dev_err(dev,
+ "platform_get_resource(IORESOURCE_MEM, 1): NULL\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ bootreg = devm_ioremap_resource(dev, bootreg_res);
+ if (IS_ERR(bootreg))
+ return PTR_ERR(bootreg);
+
+ chipsig = devm_ioremap_resource(dev, chipsig_res);
+ if (IS_ERR(chipsig))
+ return PTR_ERR(chipsig);
+
+ dsp_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dsp_clk)) {
+ dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
+
+ return PTR_ERR(dsp_clk);
+ }
+
+ rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name,
+ sizeof(*drproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ drproc = rproc->priv;
+ drproc->rproc = rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ /* everything the ISR needs is now setup, so hook it up */
+ ret = devm_request_threaded_irq(dev, irq, da8xx_rproc_callback,
+ handle_event, 0, "da8xx-remoteproc",
+ rproc);
+ if (ret) {
+ dev_err(dev, "devm_request_threaded_irq error: %d\n", ret);
+ goto free_rproc;
+ }
+
+ /*
+ * rproc_add() can end up enabling the DSP's clk with the DSP
+ * *not* in reset, but da8xx_rproc_start() needs the DSP to be
+ * held in reset at the time it is called.
+ */
+ ret = reset_assert(dev);
+ if (ret)
+ goto free_rproc;
+
+ drproc->chipsig = chipsig;
+ drproc->bootreg = bootreg;
+ drproc->ack_fxn = irq_data->chip->irq_ack;
+ drproc->irq_data = irq_data;
+ drproc->irq = irq;
+ drproc->dsp_clk = dsp_clk;
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed: %d\n", ret);
+ goto free_rproc;
+ }
+
+ return 0;
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int da8xx_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+
+ /*
+ * It's important to place the DSP in reset before going away,
+ * since a subsequent insmod of this module may enable the DSP's
+ * clock before its program/boot-address has been loaded and
+ * before this module's probe has had a chance to reset the DSP.
+ * Without the reset, the DSP can lockup permanently when it
+ * begins executing garbage.
+ */
+ reset_assert(dev);
+
+ /*
+ * The devm subsystem might end up releasing things before
+ * freeing the irq, thus allowing an interrupt to sneak in while
+ * the device is being removed. This should prevent that.
+ */
+ disable_irq(drproc->irq);
+
+ devm_clk_put(dev, drproc->dsp_clk);
+
+ rproc_del(rproc);
+ rproc_put(rproc);
+
+ return 0;
+}
+
+static struct platform_driver da8xx_rproc_driver = {
+ .probe = da8xx_rproc_probe,
+ .remove = da8xx_rproc_remove,
+ .driver = {
+ .name = "davinci-rproc",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da8xx_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DA8XX Remote Processor control driver");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 814af5a..022dc63 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -37,6 +37,7 @@
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/elf.h>
+#include <linux/crc32.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
#include <asm/byteorder.h>
@@ -45,7 +46,8 @@
typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
struct resource_table *table, int len);
-typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
+typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
+ void *, int offset, int avail);
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
@@ -192,6 +194,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
struct rproc *rproc = rvdev->rproc;
struct device *dev = &rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
+ struct fw_rsc_vdev *rsc;
dma_addr_t dma;
void *va;
int ret, size, notifyid;
@@ -202,7 +205,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
/*
* Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us
- * TODO: let the rproc know the da of this vring
*/
va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
if (!va) {
@@ -213,7 +215,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
/*
* Assign an rproc-wide unique index for this vring
* TODO: assign a notifyid for rvdev updates as well
- * TODO: let the rproc know the notifyid of this vring
* TODO: support predefined notifyids (via resource table)
*/
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
@@ -224,9 +225,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
}
notifyid = ret;
- /* Store largest notifyid */
- rproc->max_notifyid = max(rproc->max_notifyid, notifyid);
-
dev_dbg(dev, "vring%d: va %p dma %llx size %x idr %d\n", i, va,
(unsigned long long)dma, size, notifyid);
@@ -234,6 +232,15 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
rvring->dma = dma;
rvring->notifyid = notifyid;
+ /*
+ * Let the rproc know the notifyid and da of this vring.
+ * Not all platforms use dma_alloc_coherent to automatically
+ * set up the iommu. In this case the device address (da) will
+ * hold the physical address and not the device address.
+ */
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+ rsc->vring[i].da = dma;
+ rsc->vring[i].notifyid = notifyid;
return 0;
}
@@ -268,25 +275,20 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return 0;
}
-static int rproc_max_notifyid(int id, void *p, void *data)
-{
- int *maxid = data;
- *maxid = max(*maxid, id);
- return 0;
-}
-
void rproc_free_vring(struct rproc_vring *rvring)
{
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
struct rproc *rproc = rvring->rvdev->rproc;
- int maxid = 0;
+ int idx = rvring->rvdev->vring - rvring;
+ struct fw_rsc_vdev *rsc;
dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
idr_remove(&rproc->notifyids, rvring->notifyid);
- /* Find the largest remaining notifyid */
- idr_for_each(&rproc->notifyids, rproc_max_notifyid, &maxid);
- rproc->max_notifyid = maxid;
+ /* reset resource entry info */
+ rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
+ rsc->vring[idx].da = 0;
+ rsc->vring[idx].notifyid = -1;
}
/**
@@ -317,7 +319,7 @@ void rproc_free_vring(struct rproc_vring *rvring)
* Returns 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
- int avail)
+ int offset, int avail)
{
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
@@ -358,8 +360,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
goto free_rvdev;
}
- /* remember the device features */
- rvdev->dfeatures = rsc->dfeatures;
+ /* remember the resource offset*/
+ rvdev->rsc_offset = offset;
list_add_tail(&rvdev->node, &rproc->rvdevs);
@@ -394,7 +396,7 @@ free_rvdev:
* Returns 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
- int avail)
+ int offset, int avail)
{
struct rproc_mem_entry *trace;
struct device *dev = &rproc->dev;
@@ -476,7 +478,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* are outside those ranges.
*/
static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
- int avail)
+ int offset, int avail)
{
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
@@ -549,7 +551,9 @@ out:
* pressure is important; it may have a substantial impact on performance.
*/
static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc, int avail)
+ struct fw_rsc_carveout *rsc,
+ int offset, int avail)
+
{
struct rproc_mem_entry *carveout, *mapping;
struct device *dev = &rproc->dev;
@@ -671,28 +675,45 @@ free_carv:
return ret;
}
+static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+ int offset, int avail)
+{
+ /* Summarize the number of notification IDs */
+ rproc->max_notifyid += rsc->num_of_vrings;
+
+ return 0;
+}
+
/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
*/
-static rproc_handle_resource_t rproc_handle_rsc[] = {
+static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
[RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */
};
+static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
+ [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
+};
+
+static rproc_handle_resource_t rproc_count_vrings_handler[RSC_LAST] = {
+ [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
+};
+
/* handle firmware resource entries before booting the remote processor */
-static int
-rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len)
+static int rproc_handle_resources(struct rproc *rproc, int len,
+ rproc_handle_resource_t handlers[RSC_LAST])
{
struct device *dev = &rproc->dev;
rproc_handle_resource_t handler;
int ret = 0, i;
- for (i = 0; i < table->num; i++) {
- int offset = table->offset[i];
- struct fw_rsc_hdr *hdr = (void *)table + offset;
+ for (i = 0; i < rproc->table_ptr->num; i++) {
+ int offset = rproc->table_ptr->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
int avail = len - offset - sizeof(*hdr);
void *rsc = (void *)hdr + sizeof(*hdr);
@@ -709,45 +730,11 @@ rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len
continue;
}
- handler = rproc_handle_rsc[hdr->type];
+ handler = handlers[hdr->type];
if (!handler)
continue;
- ret = handler(rproc, rsc, avail);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/* handle firmware resource entries while registering the remote processor */
-static int
-rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len)
-{
- struct device *dev = &rproc->dev;
- int ret = 0, i;
-
- for (i = 0; i < table->num; i++) {
- int offset = table->offset[i];
- struct fw_rsc_hdr *hdr = (void *)table + offset;
- int avail = len - offset - sizeof(*hdr);
- struct fw_rsc_vdev *vrsc;
-
- /* make sure table isn't truncated */
- if (avail < 0) {
- dev_err(dev, "rsc table is truncated\n");
- return -EINVAL;
- }
-
- dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type);
-
- if (hdr->type != RSC_VDEV)
- continue;
-
- vrsc = (struct fw_rsc_vdev *)hdr->data;
-
- ret = rproc_handle_vdev(rproc, vrsc, avail);
+ ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
if (ret)
break;
}
@@ -805,9 +792,12 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
const char *name = rproc->firmware;
- struct resource_table *table;
+ struct resource_table *table, *loaded_table;
int ret, tablesz;
+ if (!rproc->table_ptr)
+ return -ENOMEM;
+
ret = rproc_fw_sanity_check(rproc, fw);
if (ret)
return ret;
@@ -833,8 +823,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up;
}
+ /* Verify that resource table in loaded fw is unchanged */
+ if (rproc->table_csum != crc32(0, table, tablesz)) {
+ dev_err(dev, "resource checksum failed, fw changed?\n");
+ ret = -EINVAL;
+ goto clean_up;
+ }
+
/* handle fw resources which are required to boot rproc */
- ret = rproc_handle_boot_rsc(rproc, table, tablesz);
+ ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
goto clean_up;
@@ -847,6 +844,19 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up;
}
+ /*
+ * The starting device has been given the rproc->cached_table as the
+ * resource table. The address of the vring along with the other
+ * allocated resources (carveouts etc) is stored in cached_table.
+ * In order to pass this information to the remote device we must
+ * copy this information to device memory.
+ */
+ loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
+ if (!loaded_table)
+ goto clean_up;
+
+ memcpy(loaded_table, rproc->cached_table, tablesz);
+
/* power up the remote processor */
ret = rproc->ops->start(rproc);
if (ret) {
@@ -854,6 +864,13 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up;
}
+ /*
+ * Update table_ptr so that all subsequent vring allocations and
+ * virtio fields manipulation update the actual loaded resource table
+ * in device memory.
+ */
+ rproc->table_ptr = loaded_table;
+
rproc->state = RPROC_RUNNING;
dev_info(dev, "remote processor %s is now up\n", rproc->name);
@@ -888,11 +905,30 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
if (!table)
goto out;
- /* look for virtio devices and register them */
- ret = rproc_handle_virtio_rsc(rproc, table, tablesz);
+ rproc->table_csum = crc32(0, table, tablesz);
+
+ /*
+ * Create a copy of the resource table. When a virtio device starts
+ * and calls vring_new_virtqueue() the address of the allocated vring
+ * will be stored in the cached_table. Before the device is started,
+ * cached_table will be copied into devic memory.
+ */
+ rproc->cached_table = kmalloc(tablesz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ goto out;
+
+ memcpy(rproc->cached_table, table, tablesz);
+ rproc->table_ptr = rproc->cached_table;
+
+ /* count the number of notify-ids */
+ rproc->max_notifyid = -1;
+ ret = rproc_handle_resources(rproc, tablesz, rproc_count_vrings_handler);
if (ret)
goto out;
+ /* look for virtio devices and register them */
+ ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
+
out:
release_firmware(fw);
/* allow rproc_del() contexts, if any, to proceed */
@@ -950,6 +986,9 @@ int rproc_trigger_recovery(struct rproc *rproc)
/* wait until there is no more rproc users */
wait_for_completion(&rproc->crash_comp);
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+
return rproc_add_virtio_devices(rproc);
}
@@ -1105,6 +1144,9 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
+ /* Give the next start a clean resource table */
+ rproc->table_ptr = rproc->cached_table;
+
/* if in crash state, unlock crash handler */
if (rproc->state == RPROC_CRASHED)
complete_all(&rproc->crash_comp);
@@ -1196,11 +1238,11 @@ static struct device_type rproc_type = {
* @dev: the underlying device
* @name: name of this remote processor
* @ops: platform-specific handlers (mainly start/stop)
- * @firmware: name of firmware file to load
+ * @firmware: name of firmware file to load, can be NULL
* @len: length of private data needed by the rproc driver (in bytes)
*
* Allocates a new remote processor handle, but does not register
- * it yet.
+ * it yet. if @firmware is NULL, a default name is used.
*
* This function should be used by rproc implementations during initialization
* of the remote processor.
@@ -1219,19 +1261,39 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
const char *firmware, int len)
{
struct rproc *rproc;
+ char *p, *template = "rproc-%s-fw";
+ int name_len = 0;
if (!dev || !name || !ops)
return NULL;
- rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
+ if (!firmware)
+ /*
+ * Make room for default firmware name (minus %s plus '\0').
+ * If the caller didn't pass in a firmware name then
+ * construct a default name. We're already glomming 'len'
+ * bytes onto the end of the struct rproc allocation, so do
+ * a few more for the default firmware name (but only if
+ * the caller doesn't pass one).
+ */
+ name_len = strlen(name) + strlen(template) - 2 + 1;
+
+ rproc = kzalloc(sizeof(struct rproc) + len + name_len, GFP_KERNEL);
if (!rproc) {
dev_err(dev, "%s: kzalloc failed\n", __func__);
return NULL;
}
+ if (!firmware) {
+ p = (char *)rproc + sizeof(struct rproc) + len;
+ snprintf(p, name_len, template, name);
+ } else {
+ p = (char *)firmware;
+ }
+
+ rproc->firmware = p;
rproc->name = name;
rproc->ops = ops;
- rproc->firmware = firmware;
rproc->priv = &rproc[1];
device_initialize(&rproc->dev);
@@ -1315,6 +1377,9 @@ int rproc_del(struct rproc *rproc)
list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
rproc_remove_virtio_dev(rvdev);
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+
device_del(&rproc->dev);
return 0;
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index 0d36f94..ce283a5 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -208,41 +208,22 @@ rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
return ret;
}
-/**
- * rproc_elf_find_rsc_table() - find the resource table
- * @rproc: the rproc handle
- * @fw: the ELF firmware image
- * @tablesz: place holder for providing back the table size
- *
- * This function finds the resource table inside the remote processor's
- * firmware. It is used both upon the registration of @rproc (in order
- * to look for and register the supported virito devices), and when the
- * @rproc is booted.
- *
- * Returns the pointer to the resource table if it is found, and write its
- * size into @tablesz. If a valid table isn't found, NULL is returned
- * (and @tablesz isn't set).
- */
-static struct resource_table *
-rproc_elf_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
- int *tablesz)
+static struct elf32_shdr *
+find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
{
- struct elf32_hdr *ehdr;
struct elf32_shdr *shdr;
+ int i;
const char *name_table;
- struct device *dev = &rproc->dev;
struct resource_table *table = NULL;
- int i;
- const u8 *elf_data = fw->data;
+ const u8 *elf_data = (void *)ehdr;
- ehdr = (struct elf32_hdr *)elf_data;
+ /* look for the resource table and handle it */
shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
- /* look for the resource table and handle it */
for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
- int size = shdr->sh_size;
- int offset = shdr->sh_offset;
+ u32 size = shdr->sh_size;
+ u32 offset = shdr->sh_offset;
if (strcmp(name_table + shdr->sh_name, ".resource_table"))
continue;
@@ -250,7 +231,7 @@ rproc_elf_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
table = (struct resource_table *)(elf_data + offset);
/* make sure we have the entire table */
- if (offset + size > fw->size) {
+ if (offset + size > fw_size || offset + size < size) {
dev_err(dev, "resource table truncated\n");
return NULL;
}
@@ -280,16 +261,77 @@ rproc_elf_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
return NULL;
}
- *tablesz = shdr->sh_size;
- break;
+ return shdr;
}
+ return NULL;
+}
+
+/**
+ * rproc_elf_find_rsc_table() - find the resource table
+ * @rproc: the rproc handle
+ * @fw: the ELF firmware image
+ * @tablesz: place holder for providing back the table size
+ *
+ * This function finds the resource table inside the remote processor's
+ * firmware. It is used both upon the registration of @rproc (in order
+ * to look for and register the supported virito devices), and when the
+ * @rproc is booted.
+ *
+ * Returns the pointer to the resource table if it is found, and write its
+ * size into @tablesz. If a valid table isn't found, NULL is returned
+ * (and @tablesz isn't set).
+ */
+static struct resource_table *
+rproc_elf_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
+ int *tablesz)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_shdr *shdr;
+ struct device *dev = &rproc->dev;
+ struct resource_table *table = NULL;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+
+ shdr = find_table(dev, ehdr, fw->size);
+ if (!shdr)
+ return NULL;
+
+ table = (struct resource_table *)(elf_data + shdr->sh_offset);
+ *tablesz = shdr->sh_size;
+
return table;
}
+/**
+ * rproc_elf_find_loaded_rsc_table() - find the loaded resource table
+ * @rproc: the rproc handle
+ * @fw: the ELF firmware image
+ *
+ * This function finds the location of the loaded resource table. Don't
+ * call this function if the table wasn't loaded yet - it's a bug if you do.
+ *
+ * Returns the pointer to the resource table if it is found or NULL otherwise.
+ * If the table wasn't loaded yet the result is unspecified.
+ */
+static struct resource_table *
+rproc_elf_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
+ struct elf32_shdr *shdr;
+
+ shdr = find_table(&rproc->dev, ehdr, fw->size);
+ if (!shdr)
+ return NULL;
+
+ return rproc_da_to_va(rproc, shdr->sh_addr, shdr->sh_size);
+}
+
const struct rproc_fw_ops rproc_elf_fw_ops = {
.load = rproc_elf_load_segments,
.find_rsc_table = rproc_elf_find_rsc_table,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr
};
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 7bb6648..157e762 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -27,7 +27,8 @@ struct rproc;
/**
* struct rproc_fw_ops - firmware format specific operations.
- * @find_rsc_table: finds the resource table inside the firmware image
+ * @find_rsc_table: find the resource table inside the firmware image
+ * @find_loaded_rsc_table: find the loaded resouce table
* @load: load firmeware to memory, where the remote processor
* expects to find it
* @sanity_check: sanity check the fw image
@@ -37,6 +38,8 @@ struct rproc_fw_ops {
struct resource_table *(*find_rsc_table) (struct rproc *rproc,
const struct firmware *fw,
int *tablesz);
+ struct resource_table *(*find_loaded_rsc_table)(struct rproc *rproc,
+ const struct firmware *fw);
int (*load)(struct rproc *rproc, const struct firmware *fw);
int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
@@ -102,6 +105,16 @@ struct resource_table *rproc_find_rsc_table(struct rproc *rproc,
return NULL;
}
+static inline
+struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ if (rproc->fw_ops->find_loaded_rsc_table)
+ return rproc->fw_ops->find_loaded_rsc_table(rproc, fw);
+
+ return NULL;
+}
+
extern const struct rproc_fw_ops rproc_elf_fw_ops;
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index afed9b7..b09c75c 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -173,25 +173,35 @@ error:
return ret;
}
-/*
- * We don't support yet real virtio status semantics.
- *
- * The plan is to provide this via the VDEV resource entry
- * which is part of the firmware: this way the remote processor
- * will be able to access the status values as set by us.
- */
static u8 rproc_virtio_get_status(struct virtio_device *vdev)
{
- return 0;
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ return rsc->status;
}
static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ rsc->status = status;
dev_dbg(&vdev->dev, "status: %d\n", status);
}
static void rproc_virtio_reset(struct virtio_device *vdev)
{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ rsc->status = 0;
dev_dbg(&vdev->dev, "reset !\n");
}
@@ -199,13 +209,19 @@ static void rproc_virtio_reset(struct virtio_device *vdev)
static u32 rproc_virtio_get_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
- return rvdev->dfeatures;
+ return rsc->dfeatures;
}
static void rproc_virtio_finalize_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
/* Give virtio_ring a chance to accept features */
vring_transport_features(vdev);
@@ -213,13 +229,44 @@ static void rproc_virtio_finalize_features(struct virtio_device *vdev)
/*
* Remember the finalized features of our vdev, and provide it
* to the remote processor once it is powered on.
- *
- * Similarly to the status field, we don't expose yet the negotiated
- * features to the remote processors at this point. This will be
- * fixed as part of a small resource table overhaul and then an
- * extension of the virtio resource entries.
*/
- rvdev->gfeatures = vdev->features[0];
+ rsc->gfeatures = vdev->features[0];
+}
+
+static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+ void *cfg;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+ cfg = &rsc->vring[rsc->num_of_vrings];
+
+ if (offset + len > rsc->config_len || offset + len < len) {
+ dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
+ return;
+ }
+
+ memcpy(buf, cfg + offset, len);
+}
+
+static void rproc_virtio_set(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+ void *cfg;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+ cfg = &rsc->vring[rsc->num_of_vrings];
+
+ if (offset + len > rsc->config_len || offset + len < len) {
+ dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
+ return;
+ }
+
+ memcpy(cfg + offset, buf, len);
}
static const struct virtio_config_ops rproc_virtio_config_ops = {
@@ -230,6 +277,8 @@ static const struct virtio_config_ops rproc_virtio_config_ops = {
.reset = rproc_virtio_reset,
.set_status = rproc_virtio_set_status,
.get_status = rproc_virtio_get_status,
+ .get = rproc_virtio_get,
+ .set = rproc_virtio_set,
};
/*
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index fb95c42..1ec39a4 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -64,26 +64,18 @@ static int sproc_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* Find the entry for resource table in the Table of Content */
-static struct ste_toc_entry *sproc_find_rsc_entry(const struct firmware *fw)
+static const struct ste_toc_entry *sproc_find_rsc_entry(const void *data)
{
int i;
- struct ste_toc *toc;
-
- if (!fw)
- return NULL;
-
- toc = (void *)fw->data;
+ const struct ste_toc *toc;
+ toc = data;
/* Search the table for the resource table */
for (i = 0; i < SPROC_MAX_TOC_ENTRIES &&
toc->table[i].start != 0xffffffff; i++) {
-
if (!strncmp(toc->table[i].name, SPROC_RESOURCE_NAME,
- sizeof(toc->table[i].name))) {
- if (toc->table[i].start > fw->size)
- return NULL;
+ sizeof(toc->table[i].name)))
return &toc->table[i];
- }
}
return NULL;
@@ -96,9 +88,12 @@ sproc_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
{
struct sproc *sproc = rproc->priv;
struct resource_table *table;
- struct ste_toc_entry *entry;
+ const struct ste_toc_entry *entry;
- entry = sproc_find_rsc_entry(fw);
+ if (!fw)
+ return NULL;
+
+ entry = sproc_find_rsc_entry(fw->data);
if (!entry) {
sproc_err(sproc, "resource table not found in fw\n");
return NULL;
@@ -149,10 +144,30 @@ sproc_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
return table;
}
+/* Find the resource table inside the remote processor's firmware. */
+static struct resource_table *
+sproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
+{
+ struct sproc *sproc = rproc->priv;
+ const struct ste_toc_entry *entry;
+
+ if (!fw || !sproc->fw_addr)
+ return NULL;
+
+ entry = sproc_find_rsc_entry(sproc->fw_addr);
+ if (!entry) {
+ sproc_err(sproc, "resource table not found in fw\n");
+ return NULL;
+ }
+
+ return sproc->fw_addr + entry->start;
+}
+
/* STE modem firmware handler operations */
const struct rproc_fw_ops sproc_fw_ops = {
.load = sproc_load_segments,
.find_rsc_table = sproc_find_rsc_table,
+ .find_loaded_rsc_table = sproc_find_loaded_rsc_table,
};
/* Kick the modem with specified notification id */
@@ -198,7 +213,7 @@ static int sproc_start(struct rproc *rproc)
}
/* Subscribe to notifications */
- for (i = 0; i < rproc->max_notifyid; i++) {
+ for (i = 0; i <= rproc->max_notifyid; i++) {
err = sproc->mdev->ops.kick_subscribe(sproc->mdev, i);
if (err) {
sproc_err(sproc,
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
new file mode 100644
index 0000000..c9d04f7
--- /dev/null
+++ b/drivers/reset/Kconfig
@@ -0,0 +1,13 @@
+config ARCH_HAS_RESET_CONTROLLER
+ bool
+
+menuconfig RESET_CONTROLLER
+ bool "Reset Controller Support"
+ default y if ARCH_HAS_RESET_CONTROLLER
+ help
+ Generic Reset Controller support.
+
+ This framework is designed to abstract reset handling of devices
+ via GPIOs or SoC-internal reset controller modules.
+
+ If unsure, say no.
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
new file mode 100644
index 0000000..1e2d83f
--- /dev/null
+++ b/drivers/reset/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RESET_CONTROLLER) += core.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
new file mode 100644
index 0000000..d1b6089
--- /dev/null
+++ b/drivers/reset/core.c
@@ -0,0 +1,297 @@
+/*
+ * Reset Controller framework
+ *
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+static DEFINE_MUTEX(reset_controller_list_mutex);
+static LIST_HEAD(reset_controller_list);
+
+/**
+ * struct reset_control - a reset control
+ * @rcdev: a pointer to the reset controller device
+ * this reset control belongs to
+ * @id: ID of the reset controller in the reset
+ * controller device
+ */
+struct reset_control {
+ struct reset_controller_dev *rcdev;
+ struct device *dev;
+ unsigned int id;
+};
+
+/**
+ * of_reset_simple_xlate - translate reset_spec to the reset line number
+ * @rcdev: a pointer to the reset controller device
+ * @reset_spec: reset line specifier as found in the device tree
+ * @flags: a flags pointer to fill in (optional)
+ *
+ * This simple translation function should be used for reset controllers
+ * with 1:1 mapping, where reset lines can be indexed by number without gaps.
+ */
+int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
+ return -EINVAL;
+
+ if (reset_spec->args[0] >= rcdev->nr_resets)
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+EXPORT_SYMBOL_GPL(of_reset_simple_xlate);
+
+/**
+ * reset_controller_register - register a reset controller device
+ * @rcdev: a pointer to the initialized reset controller device
+ */
+int reset_controller_register(struct reset_controller_dev *rcdev)
+{
+ if (!rcdev->of_xlate) {
+ rcdev->of_reset_n_cells = 1;
+ rcdev->of_xlate = of_reset_simple_xlate;
+ }
+
+ mutex_lock(&reset_controller_list_mutex);
+ list_add(&rcdev->list, &reset_controller_list);
+ mutex_unlock(&reset_controller_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reset_controller_register);
+
+/**
+ * reset_controller_unregister - unregister a reset controller device
+ * @rcdev: a pointer to the reset controller device
+ */
+void reset_controller_unregister(struct reset_controller_dev *rcdev)
+{
+ mutex_lock(&reset_controller_list_mutex);
+ list_del(&rcdev->list);
+ mutex_unlock(&reset_controller_list_mutex);
+}
+EXPORT_SYMBOL_GPL(reset_controller_unregister);
+
+/**
+ * reset_control_reset - reset the controlled device
+ * @rstc: reset controller
+ */
+int reset_control_reset(struct reset_control *rstc)
+{
+ if (rstc->rcdev->ops->reset)
+ return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(reset_control_reset);
+
+/**
+ * reset_control_assert - asserts the reset line
+ * @rstc: reset controller
+ */
+int reset_control_assert(struct reset_control *rstc)
+{
+ if (rstc->rcdev->ops->assert)
+ return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(reset_control_assert);
+
+/**
+ * reset_control_deassert - deasserts the reset line
+ * @rstc: reset controller
+ */
+int reset_control_deassert(struct reset_control *rstc)
+{
+ if (rstc->rcdev->ops->deassert)
+ return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(reset_control_deassert);
+
+/**
+ * reset_control_get - Lookup and obtain a reference to a reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+struct reset_control *reset_control_get(struct device *dev, const char *id)
+{
+ struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER);
+ struct reset_controller_dev *r, *rcdev;
+ struct of_phandle_args args;
+ int index = 0;
+ int rstc_id;
+ int ret;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ if (id)
+ index = of_property_match_string(dev->of_node,
+ "reset-names", id);
+ ret = of_parse_phandle_with_args(dev->of_node, "resets", "#reset-cells",
+ index, &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&reset_controller_list_mutex);
+ rcdev = NULL;
+ list_for_each_entry(r, &reset_controller_list, list) {
+ if (args.np == r->of_node) {
+ rcdev = r;
+ break;
+ }
+ }
+ of_node_put(args.np);
+
+ if (!rcdev) {
+ mutex_unlock(&reset_controller_list_mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ rstc_id = rcdev->of_xlate(rcdev, &args);
+ if (rstc_id < 0) {
+ mutex_unlock(&reset_controller_list_mutex);
+ return ERR_PTR(rstc_id);
+ }
+
+ try_module_get(rcdev->owner);
+ mutex_unlock(&reset_controller_list_mutex);
+
+ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ if (!rstc) {
+ module_put(rcdev->owner);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rstc->dev = dev;
+ rstc->rcdev = rcdev;
+ rstc->id = rstc_id;
+
+ return rstc;
+}
+EXPORT_SYMBOL_GPL(reset_control_get);
+
+/**
+ * reset_control_put - free the reset controller
+ * @rstc: reset controller
+ */
+
+void reset_control_put(struct reset_control *rstc)
+{
+ if (IS_ERR(rstc))
+ return;
+
+ module_put(rstc->rcdev->owner);
+ kfree(rstc);
+}
+EXPORT_SYMBOL_GPL(reset_control_put);
+
+static void devm_reset_control_release(struct device *dev, void *res)
+{
+ reset_control_put(*(struct reset_control **)res);
+}
+
+/**
+ * devm_reset_control_get - resource managed reset_control_get()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+struct reset_control *devm_reset_control_get(struct device *dev, const char *id)
+{
+ struct reset_control **ptr, *rstc;
+
+ ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ rstc = reset_control_get(dev, id);
+ if (!IS_ERR(rstc)) {
+ *ptr = rstc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return rstc;
+}
+EXPORT_SYMBOL_GPL(devm_reset_control_get);
+
+static int devm_reset_control_match(struct device *dev, void *res, void *data)
+{
+ struct reset_control **rstc = res;
+ if (WARN_ON(!rstc || !*rstc))
+ return 0;
+ return *rstc == data;
+}
+
+/**
+ * devm_reset_control_put - resource managed reset_control_put()
+ * @rstc: reset controller to free
+ *
+ * Deallocate a reset control allocated withd devm_reset_control_get().
+ * This function will not need to be called normally, as devres will take
+ * care of freeing the resource.
+ */
+void devm_reset_control_put(struct reset_control *rstc)
+{
+ int ret;
+
+ ret = devres_release(rstc->dev, devm_reset_control_release,
+ devm_reset_control_match, rstc);
+ if (ret)
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_reset_control_put);
+
+/**
+ * device_reset - find reset controller associated with the device
+ * and perform reset
+ * @dev: device to be reset by the controller
+ *
+ * Convenience wrapper for reset_control_get() and reset_control_reset().
+ * This is useful for the common case of devices with single, dedicated reset
+ * lines.
+ */
+int device_reset(struct device *dev)
+{
+ struct reset_control *rstc;
+ int ret;
+
+ rstc = reset_control_get(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = reset_control_reset(rstc);
+
+ reset_control_put(rstc);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_reset);
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index f6e0ea6..69a2193 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -4,5 +4,6 @@ menu "Rpmsg drivers"
config RPMSG
tristate
select VIRTIO
+ select VIRTUALIZATION
endmenu
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 7861f11..b6135d4 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -757,14 +757,14 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
mutex_lock(&vrp->tx_lock);
/* add message to the remote processor's virtqueue */
- err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
+ err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
if (err) {
/*
* need to reclaim the buffer here, otherwise it's lost
* (memory won't leak, but rpmsg won't use it again for TX).
* this will wait for a buffer management overhaul.
*/
- dev_err(dev, "virtqueue_add_buf failed: %d\n", err);
+ dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err);
goto out;
}
@@ -776,23 +776,13 @@ out:
}
EXPORT_SYMBOL(rpmsg_send_offchannel_raw);
-/* called when an rx buffer is used, and it's time to digest a message */
-static void rpmsg_recv_done(struct virtqueue *rvq)
+static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
+ struct rpmsg_hdr *msg, unsigned int len)
{
- struct rpmsg_hdr *msg;
- unsigned int len;
struct rpmsg_endpoint *ept;
struct scatterlist sg;
- struct virtproc_info *vrp = rvq->vdev->priv;
- struct device *dev = &rvq->vdev->dev;
int err;
- msg = virtqueue_get_buf(rvq, &len);
- if (!msg) {
- dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
- return;
- }
-
dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
msg->src, msg->dst, msg->len,
msg->flags, msg->reserved);
@@ -806,7 +796,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
if (len > RPMSG_BUF_SIZE ||
msg->len > (len - sizeof(struct rpmsg_hdr))) {
dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
- return;
+ return -EINVAL;
}
/* use the dst addr to fetch the callback of the appropriate user */
@@ -839,14 +829,45 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
/* add the buffer back to the remote processor's virtqueue */
- err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL);
+ err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
if (err < 0) {
dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/* called when an rx buffer is used, and it's time to digest a message */
+static void rpmsg_recv_done(struct virtqueue *rvq)
+{
+ struct virtproc_info *vrp = rvq->vdev->priv;
+ struct device *dev = &rvq->vdev->dev;
+ struct rpmsg_hdr *msg;
+ unsigned int len, msgs_received = 0;
+ int err;
+
+ msg = virtqueue_get_buf(rvq, &len);
+ if (!msg) {
+ dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
return;
}
+ while (msg) {
+ err = rpmsg_recv_single(vrp, dev, msg, len);
+ if (err)
+ break;
+
+ msgs_received++;
+
+ msg = virtqueue_get_buf(rvq, &len);
+ };
+
+ dev_dbg(dev, "Received %u messages\n", msgs_received);
+
/* tell the remote processor we added another available rx buffer */
- virtqueue_kick(vrp->rvq);
+ if (msgs_received)
+ virtqueue_kick(vrp->rvq);
}
/*
@@ -972,7 +993,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);
- err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
+ err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
GFP_KERNEL);
WARN_ON(err); /* sanity check; this can't really happen */
}
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index e96236a..ffa69e1 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -110,7 +110,7 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
static int rtc_proc_open(struct inode *inode, struct file *file)
{
int ret;
- struct rtc_device *rtc = PDE(inode)->data;
+ struct rtc_device *rtc = PDE_DATA(inode);
if (!try_module_get(THIS_MODULE))
return -ENODEV;
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 224d634..ccf54f0 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -68,6 +68,7 @@
enum rtc_type {
rtc_undef = 0,
rtc_r2025sd,
+ rtc_r2221tl,
rtc_rs5c372a,
rtc_rs5c372b,
rtc_rv5c386,
@@ -76,6 +77,7 @@ enum rtc_type {
static const struct i2c_device_id rs5c372_id[] = {
{ "r2025sd", rtc_r2025sd },
+ { "r2221tl", rtc_r2221tl },
{ "rs5c372a", rtc_rs5c372a },
{ "rs5c372b", rtc_rs5c372b },
{ "rv5c386", rtc_rv5c386 },
@@ -529,6 +531,7 @@ static int rs5c_oscillator_setup(struct rs5c372 *rs5c372)
rs5c372->time24 = 1;
break;
case rtc_r2025sd:
+ case rtc_r2221tl:
case rtc_rv5c386:
case rtc_rv5c387a:
buf[0] |= RV5C387_CTRL1_24;
@@ -609,6 +612,7 @@ static int rs5c372_probe(struct i2c_client *client,
rs5c372->time24 = 1;
break;
case rtc_r2025sd:
+ case rtc_r2221tl:
case rtc_rv5c386:
case rtc_rv5c387a:
if (rs5c372->regs[RS5C_REG_CTRL1] & RV5C387_CTRL1_24)
@@ -640,6 +644,7 @@ static int rs5c372_probe(struct i2c_client *client,
dev_info(&client->dev, "%s found, %s, driver version " DRV_VERSION "\n",
({ char *s; switch (rs5c372->type) {
case rtc_r2025sd: s = "r2025sd"; break;
+ case rtc_r2221tl: s = "r2221tl"; break;
case rtc_rs5c372a: s = "rs5c372a"; break;
case rtc_rs5c372b: s = "rs5c372b"; break;
case rtc_rv5c386: s = "rv5c386"; break;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 8e96c00..14040b2 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -29,9 +29,8 @@
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
-#include <plat/regs-rtc.h>
+#include "rtc-s3c.h"
enum s3c_cpu_type {
TYPE_S3C2410,
diff --git a/drivers/rtc/rtc-s3c.h b/drivers/rtc/rtc-s3c.h
new file mode 100644
index 0000000..004b61a
--- /dev/null
+++ b/drivers/rtc/rtc-s3c.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
+ * http://www.simtec.co.uk/products/SWLINUX/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * S3C2410 Internal RTC register definition
+*/
+
+#ifndef __ASM_ARCH_REGS_RTC_H
+#define __ASM_ARCH_REGS_RTC_H __FILE__
+
+#define S3C2410_RTCREG(x) (x)
+#define S3C2410_INTP S3C2410_RTCREG(0x30)
+#define S3C2410_INTP_ALM (1 << 1)
+#define S3C2410_INTP_TIC (1 << 0)
+
+#define S3C2410_RTCCON S3C2410_RTCREG(0x40)
+#define S3C2410_RTCCON_RTCEN (1 << 0)
+#define S3C2410_RTCCON_CNTSEL (1 << 2)
+#define S3C2410_RTCCON_CLKRST (1 << 3)
+#define S3C2443_RTCCON_TICSEL (1 << 4)
+#define S3C64XX_RTCCON_TICEN (1 << 8)
+
+#define S3C2410_TICNT S3C2410_RTCREG(0x44)
+#define S3C2410_TICNT_ENABLE (1 << 7)
+
+/* S3C2443: tick count is 15 bit wide
+ * TICNT[6:0] contains upper 7 bits
+ * TICNT1[7:0] contains lower 8 bits
+ */
+#define S3C2443_TICNT_PART(x) ((x & 0x7f00) >> 8)
+#define S3C2443_TICNT1 S3C2410_RTCREG(0x4C)
+#define S3C2443_TICNT1_PART(x) (x & 0xff)
+
+/* S3C2416: tick count is 32 bit wide
+ * TICNT[6:0] contains bits [14:8]
+ * TICNT1[7:0] contains lower 8 bits
+ * TICNT2[16:0] contains upper 17 bits
+ */
+#define S3C2416_TICNT2 S3C2410_RTCREG(0x48)
+#define S3C2416_TICNT2_PART(x) ((x & 0xffff8000) >> 15)
+
+#define S3C2410_RTCALM S3C2410_RTCREG(0x50)
+#define S3C2410_RTCALM_ALMEN (1 << 6)
+#define S3C2410_RTCALM_YEAREN (1 << 5)
+#define S3C2410_RTCALM_MONEN (1 << 4)
+#define S3C2410_RTCALM_DAYEN (1 << 3)
+#define S3C2410_RTCALM_HOUREN (1 << 2)
+#define S3C2410_RTCALM_MINEN (1 << 1)
+#define S3C2410_RTCALM_SECEN (1 << 0)
+
+#define S3C2410_ALMSEC S3C2410_RTCREG(0x54)
+#define S3C2410_ALMMIN S3C2410_RTCREG(0x58)
+#define S3C2410_ALMHOUR S3C2410_RTCREG(0x5c)
+
+#define S3C2410_ALMDATE S3C2410_RTCREG(0x60)
+#define S3C2410_ALMMON S3C2410_RTCREG(0x64)
+#define S3C2410_ALMYEAR S3C2410_RTCREG(0x68)
+
+#define S3C2410_RTCSEC S3C2410_RTCREG(0x70)
+#define S3C2410_RTCMIN S3C2410_RTCREG(0x74)
+#define S3C2410_RTCHOUR S3C2410_RTCREG(0x78)
+#define S3C2410_RTCDATE S3C2410_RTCREG(0x7c)
+#define S3C2410_RTCMON S3C2410_RTCREG(0x84)
+#define S3C2410_RTCYEAR S3C2410_RTCREG(0x88)
+
+#endif /* __ASM_ARCH_REGS_RTC_H */
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index a9cd26a..483ce08 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -30,8 +30,6 @@
#include <linux/stmp_device.h>
#include <linux/stmp3xxx_rtc_wdt.h>
-#include <mach/common.h>
-
#define STMP3XXX_RTC_CTRL 0x0
#define STMP3XXX_RTC_CTRL_SET 0x4
#define STMP3XXX_RTC_CTRL_CLR 0x8
@@ -264,7 +262,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc_data);
- mxs_reset_block(rtc_data->io);
+ stmp_reset_block(rtc_data->io);
writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN |
STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE,
@@ -307,7 +305,7 @@ static int stmp3xxx_rtc_resume(struct device *dev)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- mxs_reset_block(rtc_data->io);
+ stmp_reset_block(rtc_data->io);
writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN |
STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 82758cb..4361d97 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2997,18 +2997,14 @@ unlock:
return rc;
}
-static int dasd_release(struct gendisk *disk, fmode_t mode)
+static void dasd_release(struct gendisk *disk, fmode_t mode)
{
- struct dasd_device *base;
-
- base = dasd_device_from_gendisk(disk);
- if (!base)
- return -ENODEV;
-
- atomic_dec(&base->block->open_count);
- module_put(base->discipline->owner);
- dasd_put_device(base);
- return 0;
+ struct dasd_device *base = dasd_device_from_gendisk(disk);
+ if (base) {
+ atomic_dec(&base->block->open_count);
+ module_put(base->discipline->owner);
+ dasd_put_device(base);
+ }
}
/*
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b6ad0de..6eca019 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -26,7 +26,7 @@
#define DCSS_BUS_ID_SIZE 20
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
-static int dcssblk_release(struct gendisk *disk, fmode_t mode);
+static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn);
@@ -781,16 +781,15 @@ out:
return rc;
}
-static int
+static void
dcssblk_release(struct gendisk *disk, fmode_t mode)
{
struct dcssblk_dev_info *dev_info = disk->private_data;
struct segment_info *entry;
- int rc;
if (!dev_info) {
- rc = -ENODEV;
- goto out;
+ WARN_ON(1);
+ return;
}
down_write(&dcssblk_devices_sem);
if (atomic_dec_and_test(&dev_info->use_count)
@@ -803,9 +802,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
dev_info->save_pending = 0;
}
up_write(&dcssblk_devices_sem);
- rc = 0;
-out:
- return rc;
}
static void
@@ -826,8 +822,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if (((bio->bi_size >> 9) + bio->bi_sector)
- > get_capacity(bio->bi_bdev->bd_disk)) {
+ if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
/* Request beyond end of DCSS segment. */
goto fail;
}
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index b303cab..5d73e6e 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -123,10 +123,9 @@ static int scm_open(struct block_device *blkdev, fmode_t mode)
return scm_get_ref();
}
-static int scm_release(struct gendisk *gendisk, fmode_t mode)
+static void scm_release(struct gendisk *gendisk, fmode_t mode)
{
scm_put_ref();
- return 0;
}
static const struct block_device_operations scm_blk_devops = {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 178836e..bf07c3a 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -345,7 +345,6 @@ struct memory_increment {
struct list_head list;
u16 rn;
int standby;
- int usecount;
};
struct assign_storage_sccb {
@@ -463,21 +462,10 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size,
break;
if (start > istart + rzm - 1)
continue;
- if (online) {
- if (incr->usecount++)
- continue;
- /*
- * Don't break the loop if one assign fails. Loop may
- * be walked again on CANCEL and we can't save
- * information if state changed before or not.
- * So continue and increase usecount for all increments.
- */
+ if (online)
rc |= sclp_assign_storage(incr->rn);
- } else {
- if (--incr->usecount)
- continue;
+ else
sclp_unassign_storage(incr->rn);
- }
}
return rc ? -EIO : 0;
}
@@ -561,8 +549,6 @@ static void __init sclp_add_standby_memory(void)
add_memory_merged(0);
}
-#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
-
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
@@ -574,8 +560,6 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
return;
new_incr->rn = rn;
new_incr->standby = standby;
- if (!standby)
- new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 2282061..9e5e146 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -426,7 +426,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
- detect_memory_layout(chunk_array);
+ detect_memory_layout(chunk_array, 0);
buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
if (!buf) {
kfree(chunk_array);
@@ -557,7 +557,7 @@ static void __init set_lc_mask(struct save_area *map)
/*
* Initialize dump globals for a given architecture
*/
-static int __init sys_info_init(enum arch_id arch)
+static int __init sys_info_init(enum arch_id arch, unsigned long mem_end)
{
int rc;
@@ -579,7 +579,7 @@ static int __init sys_info_init(enum arch_id arch)
rc = init_cpu_info(arch);
if (rc)
return rc;
- sys_info.mem_size = real_memory_size;
+ sys_info.mem_size = mem_end;
return 0;
}
@@ -601,7 +601,7 @@ static int __init check_sdias(void)
return 0;
}
-static int __init get_mem_size(unsigned long *mem)
+static int __init get_mem_info(unsigned long *mem, unsigned long *end)
{
int i;
struct mem_chunk *chunk_array;
@@ -610,33 +610,31 @@ static int __init get_mem_size(unsigned long *mem)
GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
- detect_memory_layout(chunk_array);
+ detect_memory_layout(chunk_array, 0);
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (chunk_array[i].size == 0)
break;
*mem += chunk_array[i].size;
+ *end = max(*end, chunk_array[i].addr + chunk_array[i].size);
}
kfree(chunk_array);
return 0;
}
-static int __init zcore_header_init(int arch, struct zcore_header *hdr)
+static void __init zcore_header_init(int arch, struct zcore_header *hdr,
+ unsigned long mem_size)
{
- int rc, i;
- unsigned long memory = 0;
u32 prefix;
+ int i;
if (arch == ARCH_S390X)
hdr->arch_id = DUMP_ARCH_S390X;
else
hdr->arch_id = DUMP_ARCH_S390;
- rc = get_mem_size(&memory);
- if (rc)
- return rc;
- hdr->mem_size = memory;
- hdr->rmem_size = memory;
+ hdr->mem_size = mem_size;
+ hdr->rmem_size = mem_size;
hdr->mem_end = sys_info.mem_size;
- hdr->num_pages = memory / PAGE_SIZE;
+ hdr->num_pages = mem_size / PAGE_SIZE;
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
for (i = 0; zfcpdump_save_areas[i]; i++) {
@@ -647,7 +645,6 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
hdr->lc_vec[hdr->cpu_cnt] = prefix;
hdr->cpu_cnt++;
}
- return 0;
}
/*
@@ -682,9 +679,11 @@ static int __init zcore_reipl_init(void)
static int __init zcore_init(void)
{
+ unsigned long mem_size, mem_end;
unsigned char arch;
int rc;
+ mem_size = mem_end = 0;
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return -ENODATA;
if (OLDMEM_BASE)
@@ -727,13 +726,14 @@ static int __init zcore_init(void)
}
#endif /* CONFIG_64BIT */
- rc = sys_info_init(arch);
+ rc = get_mem_info(&mem_size, &mem_end);
if (rc)
goto fail;
- rc = zcore_header_init(arch, &zcore_header);
+ rc = sys_info_init(arch, mem_end);
if (rc)
goto fail;
+ zcore_header_init(arch, &zcore_header, mem_size);
rc = zcore_reipl_init();
if (rc)
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 2d2a966..a9fe3de 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
/*
* S/390 common I/O routines -- blacklisting of specific devices
*
- * Copyright IBM Corp. 1999, 2002
+ * Copyright IBM Corp. 1999, 2013
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -17,8 +17,9 @@
#include <linux/ctype.h>
#include <linux/device.h>
-#include <asm/cio.h>
#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ipl.h>
#include "blacklist.h"
#include "cio.h"
@@ -172,6 +173,29 @@ static int blacklist_parse_parameters(char *str, range_action action,
to_cssid = __MAX_CSSID;
to_ssid = __MAX_SSID;
to = __MAX_SUBCHANNEL;
+ } else if (strcmp(parm, "ipldev") == 0) {
+ if (ipl_info.type == IPL_TYPE_CCW) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.ccw.dev_id.ssid;
+ from = ipl_info.data.ccw.dev_id.devno;
+ } else if (ipl_info.type == IPL_TYPE_FCP ||
+ ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.fcp.dev_id.ssid;
+ from = ipl_info.data.fcp.dev_id.devno;
+ } else {
+ continue;
+ }
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ } else if (strcmp(parm, "condev") == 0) {
+ if (console_devno == -1)
+ continue;
+
+ from_cssid = to_cssid = 0;
+ from_ssid = to_ssid = 0;
+ from = to = console_devno;
} else {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from, msgtrigger);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index ccaae9d..4221b02 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -224,7 +224,7 @@ static int qperf_seq_open(struct inode *inode, struct file *filp)
file_inode(filp)->i_private);
}
-static struct file_operations debugfs_perf_fops = {
+static const struct file_operations debugfs_perf_fops = {
.owner = THIS_MODULE,
.open = qperf_seq_open,
.read = seq_read,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b8b340a..9de41aa 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -954,15 +954,11 @@ EXPORT_SYMBOL(ap_driver_unregister);
void ap_bus_force_rescan(void)
{
- /* Delete the AP bus rescan timer. */
- del_timer(&ap_config_timer);
-
- /* processing a synchonuous bus rescan */
- ap_scan_bus(NULL);
-
- /* Setup the AP bus rescan timer again. */
- ap_config_timer.expires = jiffies + ap_config_time * HZ;
- add_timer(&ap_config_timer);
+ /* reconfigure the AP bus rescan timer. */
+ mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ /* processing a asynchronous bus rescan */
+ queue_work(ap_work_queue, &ap_config_work);
+ flush_work(&ap_config_work);
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1305,8 +1301,9 @@ static void ap_scan_bus(struct work_struct *unused)
int rc, i;
ap_query_configuration();
- if (ap_select_domain() != 0)
+ if (ap_select_domain() != 0) {
return;
+ }
for (i = 0; i < AP_DEVICES; i++) {
qid = AP_MKQID(i, ap_domain_index);
dev = bus_find_device(&ap_bus_type, NULL,
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 6711e65..2ea6165 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -443,29 +443,30 @@ static int __init test_devices_support(unsigned long addr)
}
/*
* Init function for virtio
- * devices are in a single page above top of "normal" mem
+ * devices are in a single page above top of "normal" + standby mem
*/
static int __init kvm_devices_init(void)
{
int rc;
+ unsigned long total_memory_size = sclp_get_rzm() * sclp_get_rnmax();
if (!MACHINE_IS_KVM)
return -ENODEV;
- if (test_devices_support(real_memory_size) < 0)
+ if (test_devices_support(total_memory_size) < 0)
return -ENODEV;
- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+ rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
if (rc)
return rc;
- kvm_devices = (void *) real_memory_size;
+ kvm_devices = (void *) total_memory_size;
kvm_root = root_device_register("kvm_s390");
if (IS_ERR(kvm_root)) {
rc = PTR_ERR(kvm_root);
printk(KERN_ERR "Could not register kvm_s390 root device");
- vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+ vmem_remove_mapping(total_memory_size, PAGE_SIZE);
return rc;
}
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index fb877b59..779dc51 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -31,6 +31,7 @@
#include <asm/irq.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
+#include <asm/virtio-ccw.h>
/*
* virtio related functions
@@ -77,12 +78,9 @@ struct virtio_ccw_vq_info {
void *queue;
struct vq_info_block *info_block;
struct list_head node;
+ long cookie;
};
-#define KVM_VIRTIO_CCW_RING_ALIGN 4096
-
-#define KVM_S390_VIRTIO_CCW_NOTIFY 3
-
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
@@ -135,8 +133,11 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
do {
spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
- if (!ret)
+ if (!ret) {
+ if (!vcdev->curr_io)
+ vcdev->err = 0;
vcdev->curr_io |= flag;
+ }
spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
cpu_relax();
} while (ret == -EBUSY);
@@ -145,15 +146,18 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
}
static inline long do_kvm_notify(struct subchannel_id schid,
- unsigned long queue_index)
+ unsigned long queue_index,
+ long cookie)
{
register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
register struct subchannel_id __schid asm("2") = schid;
register unsigned long __index asm("3") = queue_index;
register long __rc asm("2");
+ register long __cookie asm("4") = cookie;
asm volatile ("diag 2,4,0x500\n"
- : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index)
+ : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
+ "d"(__cookie)
: "memory", "cc");
return __rc;
}
@@ -166,7 +170,7 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq)
vcdev = to_vc_device(info->vq->vdev);
ccw_device_get_schid(vcdev->cdev, &schid);
- do_kvm_notify(schid, vq->index);
+ info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
}
static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index d7ca247..344d875 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3201,26 +3201,30 @@ static int BusLogic_BIOSDiskParameters(struct scsi_device *sdev, struct block_de
BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/<N>.
*/
-static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *shost, char *ProcBuffer, char **StartPointer, off_t Offset, int BytesAvailable, int WriteFlag)
+static int BusLogic_write_info(struct Scsi_Host *shost, char *ProcBuffer, int BytesAvailable)
{
struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) shost->hostdata;
struct BusLogic_TargetStatistics *TargetStatistics;
- int TargetID, Length;
- char *Buffer;
TargetStatistics = HostAdapter->TargetStatistics;
- if (WriteFlag) {
- HostAdapter->ExternalHostAdapterResets = 0;
- HostAdapter->HostAdapterInternalErrors = 0;
- memset(TargetStatistics, 0, BusLogic_MaxTargetDevices * sizeof(struct BusLogic_TargetStatistics));
- return 0;
- }
- Buffer = HostAdapter->MessageBuffer;
- Length = HostAdapter->MessageBufferLength;
- Length += sprintf(&Buffer[Length], "\n\
+ HostAdapter->ExternalHostAdapterResets = 0;
+ HostAdapter->HostAdapterInternalErrors = 0;
+ memset(TargetStatistics, 0, BusLogic_MaxTargetDevices * sizeof(struct BusLogic_TargetStatistics));
+ return 0;
+}
+
+static int BusLogic_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) shost->hostdata;
+ struct BusLogic_TargetStatistics *TargetStatistics;
+ int TargetID;
+
+ TargetStatistics = HostAdapter->TargetStatistics;
+ seq_write(m, HostAdapter->MessageBuffer, HostAdapter->MessageBufferLength);
+ seq_printf(m, "\n\
Current Driver Queue Depth: %d\n\
Currently Allocated CCBs: %d\n", HostAdapter->DriverQueueDepth, HostAdapter->AllocatedCCBs);
- Length += sprintf(&Buffer[Length], "\n\n\
+ seq_printf(m, "\n\n\
DATA TRANSFER STATISTICS\n\
\n\
Target Tagged Queuing Queue Depth Active Attempted Completed\n\
@@ -3229,66 +3233,62 @@ Target Tagged Queuing Queue Depth Active Attempted Completed\n\
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID];
if (!TargetFlags->TargetExists)
continue;
- Length += sprintf(&Buffer[Length], " %2d %s", TargetID, (TargetFlags->TaggedQueuingSupported ? (TargetFlags->TaggedQueuingActive ? " Active" : (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)
+ seq_printf(m, " %2d %s", TargetID, (TargetFlags->TaggedQueuingSupported ? (TargetFlags->TaggedQueuingActive ? " Active" : (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)
? " Permitted" : " Disabled"))
: "Not Supported"));
- Length += sprintf(&Buffer[Length],
+ seq_printf(m,
" %3d %3u %9u %9u\n", HostAdapter->QueueDepth[TargetID], HostAdapter->ActiveCommands[TargetID], TargetStatistics[TargetID].CommandsAttempted, TargetStatistics[TargetID].CommandsCompleted);
}
- Length += sprintf(&Buffer[Length], "\n\
+ seq_printf(m, "\n\
Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\
====== ============= ============== =================== ===================\n");
for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) {
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID];
if (!TargetFlags->TargetExists)
continue;
- Length += sprintf(&Buffer[Length], " %2d %9u %9u", TargetID, TargetStatistics[TargetID].ReadCommands, TargetStatistics[TargetID].WriteCommands);
+ seq_printf(m, " %2d %9u %9u", TargetID, TargetStatistics[TargetID].ReadCommands, TargetStatistics[TargetID].WriteCommands);
if (TargetStatistics[TargetID].TotalBytesRead.Billions > 0)
- Length += sprintf(&Buffer[Length], " %9u%09u", TargetStatistics[TargetID].TotalBytesRead.Billions, TargetStatistics[TargetID].TotalBytesRead.Units);
+ seq_printf(m, " %9u%09u", TargetStatistics[TargetID].TotalBytesRead.Billions, TargetStatistics[TargetID].TotalBytesRead.Units);
else
- Length += sprintf(&Buffer[Length], " %9u", TargetStatistics[TargetID].TotalBytesRead.Units);
+ seq_printf(m, " %9u", TargetStatistics[TargetID].TotalBytesRead.Units);
if (TargetStatistics[TargetID].TotalBytesWritten.Billions > 0)
- Length += sprintf(&Buffer[Length], " %9u%09u\n", TargetStatistics[TargetID].TotalBytesWritten.Billions, TargetStatistics[TargetID].TotalBytesWritten.Units);
+ seq_printf(m, " %9u%09u\n", TargetStatistics[TargetID].TotalBytesWritten.Billions, TargetStatistics[TargetID].TotalBytesWritten.Units);
else
- Length += sprintf(&Buffer[Length], " %9u\n", TargetStatistics[TargetID].TotalBytesWritten.Units);
+ seq_printf(m, " %9u\n", TargetStatistics[TargetID].TotalBytesWritten.Units);
}
- Length += sprintf(&Buffer[Length], "\n\
+ seq_printf(m, "\n\
Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\
====== ======= ========= ========= ========= ========= =========\n");
for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) {
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID];
if (!TargetFlags->TargetExists)
continue;
- Length +=
- sprintf(&Buffer[Length],
+ seq_printf(m,
" %2d Read %9u %9u %9u %9u %9u\n", TargetID,
TargetStatistics[TargetID].ReadCommandSizeBuckets[0],
TargetStatistics[TargetID].ReadCommandSizeBuckets[1], TargetStatistics[TargetID].ReadCommandSizeBuckets[2], TargetStatistics[TargetID].ReadCommandSizeBuckets[3], TargetStatistics[TargetID].ReadCommandSizeBuckets[4]);
- Length +=
- sprintf(&Buffer[Length],
+ seq_printf(m,
" %2d Write %9u %9u %9u %9u %9u\n", TargetID,
TargetStatistics[TargetID].WriteCommandSizeBuckets[0],
TargetStatistics[TargetID].WriteCommandSizeBuckets[1], TargetStatistics[TargetID].WriteCommandSizeBuckets[2], TargetStatistics[TargetID].WriteCommandSizeBuckets[3], TargetStatistics[TargetID].WriteCommandSizeBuckets[4]);
}
- Length += sprintf(&Buffer[Length], "\n\
+ seq_printf(m, "\n\
Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\
====== ======= ========= ========= ========= ========= =========\n");
for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) {
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID];
if (!TargetFlags->TargetExists)
continue;
- Length +=
- sprintf(&Buffer[Length],
+ seq_printf(m,
" %2d Read %9u %9u %9u %9u %9u\n", TargetID,
TargetStatistics[TargetID].ReadCommandSizeBuckets[5],
TargetStatistics[TargetID].ReadCommandSizeBuckets[6], TargetStatistics[TargetID].ReadCommandSizeBuckets[7], TargetStatistics[TargetID].ReadCommandSizeBuckets[8], TargetStatistics[TargetID].ReadCommandSizeBuckets[9]);
- Length +=
- sprintf(&Buffer[Length],
+ seq_printf(m,
" %2d Write %9u %9u %9u %9u %9u\n", TargetID,
TargetStatistics[TargetID].WriteCommandSizeBuckets[5],
TargetStatistics[TargetID].WriteCommandSizeBuckets[6], TargetStatistics[TargetID].WriteCommandSizeBuckets[7], TargetStatistics[TargetID].WriteCommandSizeBuckets[8], TargetStatistics[TargetID].WriteCommandSizeBuckets[9]);
}
- Length += sprintf(&Buffer[Length], "\n\n\
+ seq_printf(m, "\n\n\
ERROR RECOVERY STATISTICS\n\
\n\
Command Aborts Bus Device Resets Host Adapter Resets\n\
@@ -3299,20 +3299,12 @@ Target Requested Completed Requested Completed Requested Completed\n\
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID];
if (!TargetFlags->TargetExists)
continue;
- Length += sprintf(&Buffer[Length], "\
+ seq_printf(m, "\
%2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", TargetID, TargetStatistics[TargetID].CommandAbortsRequested, TargetStatistics[TargetID].CommandAbortsAttempted, TargetStatistics[TargetID].CommandAbortsCompleted, TargetStatistics[TargetID].BusDeviceResetsRequested, TargetStatistics[TargetID].BusDeviceResetsAttempted, TargetStatistics[TargetID].BusDeviceResetsCompleted, TargetStatistics[TargetID].HostAdapterResetsRequested, TargetStatistics[TargetID].HostAdapterResetsAttempted, TargetStatistics[TargetID].HostAdapterResetsCompleted);
}
- Length += sprintf(&Buffer[Length], "\nExternal Host Adapter Resets: %d\n", HostAdapter->ExternalHostAdapterResets);
- Length += sprintf(&Buffer[Length], "Host Adapter Internal Errors: %d\n", HostAdapter->HostAdapterInternalErrors);
- if (Length >= BusLogic_MessageBufferSize)
- BusLogic_Error("Message Buffer length %d exceeds size %d\n", HostAdapter, Length, BusLogic_MessageBufferSize);
- if ((Length -= Offset) <= 0)
- return 0;
- if (Length >= BytesAvailable)
- Length = BytesAvailable;
- memcpy(ProcBuffer, HostAdapter->MessageBuffer + Offset, Length);
- *StartPointer = ProcBuffer;
- return Length;
+ seq_printf(m, "\nExternal Host Adapter Resets: %d\n", HostAdapter->ExternalHostAdapterResets);
+ seq_printf(m, "Host Adapter Internal Errors: %d\n", HostAdapter->HostAdapterInternalErrors);
+ return 0;
}
@@ -3566,7 +3558,8 @@ static int __init BusLogic_ParseDriverOptions(char *OptionsString)
static struct scsi_host_template Bus_Logic_template = {
.module = THIS_MODULE,
.proc_name = "BusLogic",
- .proc_info = BusLogic_ProcDirectoryInfo,
+ .write_info = BusLogic_write_info,
+ .show_info = BusLogic_show_info,
.name = "BusLogic",
.info = BusLogic_DriverInfo,
.queuecommand = BusLogic_QueueCommand,
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 649fcb3..6c6c13c 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1321,7 +1321,6 @@ static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T Co
static const char *BusLogic_DriverInfo(struct Scsi_Host *);
static int BusLogic_QueueCommand(struct Scsi_Host *h, struct scsi_cmnd *);
static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *);
-static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int);
static int BusLogic_SlaveConfigure(struct scsi_device *);
static void BusLogic_QueueCompletedCCB(struct BusLogic_CCB *);
static irqreturn_t BusLogic_InterruptHandler(int, void *);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 450353e..1e9d6ad 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -695,33 +695,35 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
* Return the number of bytes read from or written
*/
+static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
+ char *buffer, int length)
+{
+#ifdef DTC_PUBLIC_RELEASE
+ dtc_wmaxi = dtc_maxi = 0;
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+ pas_wmaxi = pas_maxi = 0;
+#endif
+ return (-ENOSYS); /* Currently this is a no-op */
+}
+
#undef SPRINTF
-#define SPRINTF(args...) do { if(pos < buffer + length-80) pos += sprintf(pos, ## args); } while(0)
+#define SPRINTF(args...) seq_printf(m, ## args)
static
-char *lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, char *pos, char *buffer, int length);
+void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m);
static
-char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len);
+void lprint_command(unsigned char *cmd, struct seq_file *m);
static
-char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
+void lprint_opcode(int opcode, struct seq_file *m);
-static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
- char *buffer, char **start, off_t offset, int length, int inout)
+static int __maybe_unused NCR5380_show_info(struct seq_file *m,
+ struct Scsi_Host *instance)
{
- char *pos = buffer;
struct NCR5380_hostdata *hostdata;
Scsi_Cmnd *ptr;
hostdata = (struct NCR5380_hostdata *) instance->hostdata;
- if (inout) { /* Has data been written to the file ? */
-#ifdef DTC_PUBLIC_RELEASE
- dtc_wmaxi = dtc_maxi = 0;
-#endif
-#ifdef PAS16_PUBLIC_RELEASE
- pas_wmaxi = pas_maxi = 0;
-#endif
- return (-ENOSYS); /* Currently this is a no-op */
- }
SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE);
if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400)
SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE);
@@ -755,46 +757,37 @@ static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
if (!hostdata->connected)
SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
else
- pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, pos, buffer, length);
+ lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m);
SPRINTF("scsi%d: issue_queue\n", instance->host_no);
for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
- pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
+ lprint_Scsi_Cmnd(ptr, m);
SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
- pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
+ lprint_Scsi_Cmnd(ptr, m);
spin_unlock_irq(instance->host_lock);
-
- *start = buffer;
- if (pos - buffer < offset)
- return 0;
- else if (pos - buffer - offset < length)
- return pos - buffer - offset;
- return length;
+ return 0;
}
-static char *lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, char *pos, char *buffer, int length)
+static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m)
{
SPRINTF("scsi%d : destination target %d, lun %d\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
SPRINTF(" command = ");
- pos = lprint_command(cmd->cmnd, pos, buffer, length);
- return (pos);
+ lprint_command(cmd->cmnd, m);
}
-static char *lprint_command(unsigned char *command, char *pos, char *buffer, int length)
+static void lprint_command(unsigned char *command, struct seq_file *m)
{
int i, s;
- pos = lprint_opcode(command[0], pos, buffer, length);
+ lprint_opcode(command[0], m);
for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
SPRINTF("%02x ", command[i]);
SPRINTF("\n");
- return (pos);
}
-static char *lprint_opcode(int opcode, char *pos, char *buffer, int length)
+static void lprint_opcode(int opcode, struct seq_file *m)
{
SPRINTF("%2d (0x%02x)", opcode, opcode);
- return (pos);
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index fd40a32..14964d0 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -314,8 +314,10 @@ static void NCR5380_print(struct Scsi_Host *instance);
static int NCR5380_abort(Scsi_Cmnd * cmd);
static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
-static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
- char *buffer, char **start, off_t offset, int length, int inout);
+static int __maybe_unused NCR5380_show_info(struct seq_file *,
+ struct Scsi_Host *);
+static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
+ char *buffer, int length);
static void NCR5380_reselect(struct Scsi_Host *instance);
static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag);
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 3e09aa2..30fa38a 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -166,7 +166,8 @@ static int a2091_bus_reset(struct scsi_cmnd *cmd)
static struct scsi_host_template a2091_scsi_template = {
.module = THIS_MODULE,
.name = "Commodore A2091/A590 SCSI",
- .proc_info = wd33c93_proc_info,
+ .show_info = wd33c93_show_info,
+ .write_info = wd33c93_write_info,
.proc_name = "A2901",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index e29fe0e..c487916 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -181,7 +181,8 @@ static int a3000_bus_reset(struct scsi_cmnd *cmd)
static struct scsi_host_template amiga_a3000_scsi_template = {
.module = THIS_MODULE,
.name = "Amiga 3000 built-in SCSI",
- .proc_info = wd33c93_proc_info,
+ .show_info = wd33c93_show_info,
+ .write_info = wd33c93_write_info,
.proc_name = "A3000",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index dcfaee6..c67e401 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2178,22 +2178,6 @@ do { \
#define ASC_INFO_SIZE 128 /* advansys_info() line size */
-#ifdef CONFIG_PROC_FS
-/* /proc/scsi/advansys/[0...] related definitions */
-#define ASC_PRTBUF_SIZE 2048
-#define ASC_PRTLINE_SIZE 160
-
-#define ASC_PRT_NEXT() \
- if (cp) { \
- totlen += len; \
- leftlen -= len; \
- if (leftlen == 0) { \
- return totlen; \
- } \
- cp += len; \
- }
-#endif /* CONFIG_PROC_FS */
-
/* Asc Library return codes */
#define ASC_TRUE 1
#define ASC_FALSE 0
@@ -2384,7 +2368,6 @@ struct asc_board {
} eep_config;
ulong last_reset; /* Saved last reset time */
/* /proc/scsi/advansys/[0...] */
- char *prtbuf; /* /proc print buffer */
#ifdef ADVANSYS_STATS
struct asc_stats asc_stats; /* Board statistics */
#endif /* ADVANSYS_STATS */
@@ -2875,64 +2858,21 @@ static const char *advansys_info(struct Scsi_Host *shost)
}
#ifdef CONFIG_PROC_FS
-/*
- * asc_prt_line()
- *
- * If 'cp' is NULL print to the console, otherwise print to a buffer.
- *
- * Return 0 if printing to the console, otherwise return the number of
- * bytes written to the buffer.
- *
- * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack
- * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes.
- */
-static int asc_prt_line(char *buf, int buflen, char *fmt, ...)
-{
- va_list args;
- int ret;
- char s[ASC_PRTLINE_SIZE];
-
- va_start(args, fmt);
- ret = vsprintf(s, fmt, args);
- BUG_ON(ret >= ASC_PRTLINE_SIZE);
- if (buf == NULL) {
- (void)printk(s);
- ret = 0;
- } else {
- ret = min(buflen, ret);
- memcpy(buf, s, ret);
- }
- va_end(args);
- return ret;
-}
/*
* asc_prt_board_devices()
*
* Print driver information for devices attached to the board.
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- int leftlen;
- int totlen;
- int len;
int chip_scsi_id;
int i;
- leftlen = cplen;
- totlen = len = 0;
-
- len = asc_prt_line(cp, leftlen,
- "\nDevice Information for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ "\nDevice Information for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
if (ASC_NARROW_BOARD(boardp)) {
chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
@@ -2940,60 +2880,42 @@ static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen)
chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
}
- len = asc_prt_line(cp, leftlen, "Target IDs Detected:");
- ASC_PRT_NEXT();
+ seq_printf(m, "Target IDs Detected:");
for (i = 0; i <= ADV_MAX_TID; i++) {
- if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) {
- len = asc_prt_line(cp, leftlen, " %X,", i);
- ASC_PRT_NEXT();
- }
+ if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i))
+ seq_printf(m, " %X,", i);
}
- len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id);
- ASC_PRT_NEXT();
-
- return totlen;
+ seq_printf(m, " (%X=Host Adapter)\n", chip_scsi_id);
}
/*
* Display Wide Board BIOS Information.
*/
-static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- int leftlen;
- int totlen;
- int len;
ushort major, minor, letter;
- leftlen = cplen;
- totlen = len = 0;
-
- len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: ");
- ASC_PRT_NEXT();
+ seq_printf(m, "\nROM BIOS Version: ");
/*
* If the BIOS saved a valid signature, then fill in
* the BIOS code segment base address.
*/
if (boardp->bios_signature != 0x55AA) {
- len = asc_prt_line(cp, leftlen, "Disabled or Pre-3.1\n");
- ASC_PRT_NEXT();
- len = asc_prt_line(cp, leftlen,
- "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n");
- ASC_PRT_NEXT();
- len = asc_prt_line(cp, leftlen,
- "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "Disabled or Pre-3.1\n");
+ seq_printf(m,
+ "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n");
+ seq_printf(m,
+ "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
} else {
major = (boardp->bios_version >> 12) & 0xF;
minor = (boardp->bios_version >> 8) & 0xF;
letter = (boardp->bios_version & 0xFF);
- len = asc_prt_line(cp, leftlen, "%d.%d%c\n",
+ seq_printf(m, "%d.%d%c\n",
major, minor,
letter >= 26 ? '?' : letter + 'A');
- ASC_PRT_NEXT();
-
/*
* Current available ROM BIOS release is 3.1I for UW
* and 3.2I for U2W. This code doesn't differentiate
@@ -3001,16 +2923,12 @@ static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen)
*/
if (major < 3 || (major <= 3 && minor < 1) ||
(major <= 3 && minor <= 1 && letter < ('I' - 'A'))) {
- len = asc_prt_line(cp, leftlen,
- "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n");
- ASC_PRT_NEXT();
- len = asc_prt_line(cp, leftlen,
- "ftp://ftp.connectcom.net/pub\n");
- ASC_PRT_NEXT();
+ seq_printf(m,
+ "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n");
+ seq_printf(m,
+ "ftp://ftp.connectcom.net/pub\n");
}
}
-
- return totlen;
}
/*
@@ -3115,20 +3033,11 @@ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp)
* asc_prt_asc_board_eeprom()
*
* Print board EEPROM configuration.
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
ASC_DVC_VAR *asc_dvc_varp;
- int leftlen;
- int totlen;
- int len;
ASCEEP_CONFIG *ep;
int i;
#ifdef CONFIG_ISA
@@ -3139,129 +3048,75 @@ static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
ep = &boardp->eep_config.asc_eep;
- leftlen = cplen;
- totlen = len = 0;
-
- len = asc_prt_line(cp, leftlen,
- "\nEEPROM Settings for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ "\nEEPROM Settings for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr)
- == ASC_TRUE) {
- len =
- asc_prt_line(cp, leftlen, " Serial Number: %s\n",
- serialstr);
- ASC_PRT_NEXT();
- } else {
- if (ep->adapter_info[5] == 0xBB) {
- len = asc_prt_line(cp, leftlen,
- " Default Settings Used for EEPROM-less Adapter.\n");
- ASC_PRT_NEXT();
- } else {
- len = asc_prt_line(cp, leftlen,
- " Serial Number Signature Not Present.\n");
- ASC_PRT_NEXT();
- }
- }
-
- len = asc_prt_line(cp, leftlen,
- " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
- ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng,
- ep->max_tag_qng);
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen,
- " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam);
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " Target ID: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ASC_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %d", i);
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " Disconnects: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ASC_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (ep->
- disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " Command Queuing: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ASC_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (ep->
- use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " Start Motor: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ASC_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (ep->
- start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " Synchronous Transfer:");
- ASC_PRT_NEXT();
- for (i = 0; i <= ASC_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (ep->
- init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ == ASC_TRUE)
+ seq_printf(m, " Serial Number: %s\n", serialstr);
+ else if (ep->adapter_info[5] == 0xBB)
+ seq_printf(m,
+ " Default Settings Used for EEPROM-less Adapter.\n");
+ else
+ seq_printf(m,
+ " Serial Number Signature Not Present.\n");
+
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng,
+ ep->max_tag_qng);
+
+ seq_printf(m,
+ " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam);
+
+ seq_printf(m, " Target ID: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %d", i);
+ seq_printf(m, "\n");
+
+ seq_printf(m, " Disconnects: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
+
+ seq_printf(m, " Command Queuing: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
+
+ seq_printf(m, " Start Motor: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
+
+ seq_printf(m, " Synchronous Transfer:");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
#ifdef CONFIG_ISA
if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
- len = asc_prt_line(cp, leftlen,
- " Host ISA DMA speed: %d MB/S\n",
- isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " Host ISA DMA speed: %d MB/S\n",
+ isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
}
#endif /* CONFIG_ISA */
-
- return totlen;
}
/*
* asc_prt_adv_board_eeprom()
*
* Print board EEPROM configuration.
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
ADV_DVC_VAR *adv_dvc_varp;
- int leftlen;
- int totlen;
- int len;
int i;
char *termstr;
uchar serialstr[13];
@@ -3281,13 +3136,9 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
ep_38C1600 = &boardp->eep_config.adv_38C1600_eep;
}
- leftlen = cplen;
- totlen = len = 0;
-
- len = asc_prt_line(cp, leftlen,
- "\nEEPROM Settings for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ "\nEEPROM Settings for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
wordp = &ep_3550->serial_number_word1;
@@ -3297,38 +3148,28 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
wordp = &ep_38C1600->serial_number_word1;
}
- if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) {
- len =
- asc_prt_line(cp, leftlen, " Serial Number: %s\n",
- serialstr);
- ASC_PRT_NEXT();
- } else {
- len = asc_prt_line(cp, leftlen,
- " Serial Number Signature Not Present.\n");
- ASC_PRT_NEXT();
- }
+ if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE)
+ seq_printf(m, " Serial Number: %s\n", serialstr);
+ else
+ seq_printf(m, " Serial Number Signature Not Present.\n");
- if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
- len = asc_prt_line(cp, leftlen,
- " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
- ep_3550->adapter_scsi_id,
- ep_3550->max_host_qng, ep_3550->max_dvc_qng);
- ASC_PRT_NEXT();
- } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
- len = asc_prt_line(cp, leftlen,
- " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
- ep_38C0800->adapter_scsi_id,
- ep_38C0800->max_host_qng,
- ep_38C0800->max_dvc_qng);
- ASC_PRT_NEXT();
- } else {
- len = asc_prt_line(cp, leftlen,
- " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
- ep_38C1600->adapter_scsi_id,
- ep_38C1600->max_host_qng,
- ep_38C1600->max_dvc_qng);
- ASC_PRT_NEXT();
- }
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550)
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep_3550->adapter_scsi_id,
+ ep_3550->max_host_qng, ep_3550->max_dvc_qng);
+ else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800)
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep_38C0800->adapter_scsi_id,
+ ep_38C0800->max_host_qng,
+ ep_38C0800->max_dvc_qng);
+ else
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep_38C1600->adapter_scsi_id,
+ ep_38C1600->max_host_qng,
+ ep_38C1600->max_dvc_qng);
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->termination;
} else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
@@ -3352,34 +3193,26 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
break;
}
- if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
- len = asc_prt_line(cp, leftlen,
- " termination: %u (%s), bios_ctrl: 0x%x\n",
- ep_3550->termination, termstr,
- ep_3550->bios_ctrl);
- ASC_PRT_NEXT();
- } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
- len = asc_prt_line(cp, leftlen,
- " termination: %u (%s), bios_ctrl: 0x%x\n",
- ep_38C0800->termination_lvd, termstr,
- ep_38C0800->bios_ctrl);
- ASC_PRT_NEXT();
- } else {
- len = asc_prt_line(cp, leftlen,
- " termination: %u (%s), bios_ctrl: 0x%x\n",
- ep_38C1600->termination_lvd, termstr,
- ep_38C1600->bios_ctrl);
- ASC_PRT_NEXT();
- }
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550)
+ seq_printf(m,
+ " termination: %u (%s), bios_ctrl: 0x%x\n",
+ ep_3550->termination, termstr,
+ ep_3550->bios_ctrl);
+ else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800)
+ seq_printf(m,
+ " termination: %u (%s), bios_ctrl: 0x%x\n",
+ ep_38C0800->termination_lvd, termstr,
+ ep_38C0800->bios_ctrl);
+ else
+ seq_printf(m,
+ " termination: %u (%s), bios_ctrl: 0x%x\n",
+ ep_38C1600->termination_lvd, termstr,
+ ep_38C1600->bios_ctrl);
- len = asc_prt_line(cp, leftlen, " Target ID: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %X", i);
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Target ID: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %X", i);
+ seq_printf(m, "\n");
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->disc_enable;
@@ -3388,15 +3221,11 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
} else {
word = ep_38C1600->disc_enable;
}
- len = asc_prt_line(cp, leftlen, " Disconnects: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Disconnects: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->tagqng_able;
@@ -3405,15 +3234,11 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
} else {
word = ep_38C1600->tagqng_able;
}
- len = asc_prt_line(cp, leftlen, " Command Queuing: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Command Queuing: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
word = ep_3550->start_motor;
@@ -3422,42 +3247,28 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
} else {
word = ep_38C1600->start_motor;
}
- len = asc_prt_line(cp, leftlen, " Start Motor: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Start Motor: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
- len = asc_prt_line(cp, leftlen, " Synchronous Transfer:");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (ep_3550->
- sdtr_able & ADV_TID_TO_TIDMASK(i)) ?
- 'Y' : 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Synchronous Transfer:");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ?
+ 'Y' : 'N');
+ seq_printf(m, "\n");
}
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
- len = asc_prt_line(cp, leftlen, " Ultra Transfer: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (ep_3550->
- ultra_able & ADV_TID_TO_TIDMASK(i))
- ? 'Y' : 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Ultra Transfer: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i))
+ ? 'Y' : 'N');
+ seq_printf(m, "\n");
}
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
@@ -3467,21 +3278,16 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
} else {
word = ep_38C1600->wdtr_able;
}
- len = asc_prt_line(cp, leftlen, " Wide Transfer: ");
- ASC_PRT_NEXT();
- for (i = 0; i <= ADV_MAX_TID; i++) {
- len = asc_prt_line(cp, leftlen, " %c",
- (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
- ASC_PRT_NEXT();
- }
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, " Wide Transfer: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_printf(m, "\n");
if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 ||
adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) {
- len = asc_prt_line(cp, leftlen,
- " Synchronous Transfer Speed (Mhz):\n ");
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " Synchronous Transfer Speed (Mhz):\n ");
for (i = 0; i <= ADV_MAX_TID; i++) {
char *speed_str;
@@ -3517,99 +3323,64 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen
speed_str = "Unk";
break;
}
- len = asc_prt_line(cp, leftlen, "%X:%s ", i, speed_str);
- ASC_PRT_NEXT();
- if (i == 7) {
- len = asc_prt_line(cp, leftlen, "\n ");
- ASC_PRT_NEXT();
- }
+ seq_printf(m, "%X:%s ", i, speed_str);
+ if (i == 7)
+ seq_printf(m, "\n ");
sdtr_speed >>= 4;
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
}
-
- return totlen;
}
/*
* asc_prt_driver_conf()
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_driver_conf(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- int leftlen;
- int totlen;
- int len;
int chip_scsi_id;
- leftlen = cplen;
- totlen = len = 0;
+ seq_printf(m,
+ "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
- len = asc_prt_line(cp, leftlen,
- "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n",
+ shost->host_busy, shost->last_reset, shost->max_id,
+ shost->max_lun, shost->max_channel);
- len = asc_prt_line(cp, leftlen,
- " host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n",
- shost->host_busy, shost->last_reset, shost->max_id,
- shost->max_lun, shost->max_channel);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
+ shost->unique_id, shost->can_queue, shost->this_id,
+ shost->sg_tablesize, shost->cmd_per_lun);
- len = asc_prt_line(cp, leftlen,
- " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
- shost->unique_id, shost->can_queue, shost->this_id,
- shost->sg_tablesize, shost->cmd_per_lun);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " unchecked_isa_dma %d, use_clustering %d\n",
+ shost->unchecked_isa_dma, shost->use_clustering);
- len = asc_prt_line(cp, leftlen,
- " unchecked_isa_dma %d, use_clustering %d\n",
- shost->unchecked_isa_dma, shost->use_clustering);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
+ boardp->flags, boardp->last_reset, jiffies,
+ boardp->asc_n_io_port);
- len = asc_prt_line(cp, leftlen,
- " flags 0x%x, last_reset 0x%x, jiffies 0x%x, asc_n_io_port 0x%x\n",
- boardp->flags, boardp->last_reset, jiffies,
- boardp->asc_n_io_port);
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " io_port 0x%x\n", shost->io_port);
- ASC_PRT_NEXT();
+ seq_printf(m, " io_port 0x%lx\n", shost->io_port);
if (ASC_NARROW_BOARD(boardp)) {
chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
} else {
chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
}
-
- return totlen;
}
/*
* asc_prt_asc_board_info()
*
* Print dynamic board configuration information.
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
int chip_scsi_id;
- int leftlen;
- int totlen;
- int len;
ASC_DVC_VAR *v;
ASC_DVC_CFG *c;
int i;
@@ -3619,105 +3390,79 @@ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen)
c = &boardp->dvc_cfg.asc_dvc_cfg;
chip_scsi_id = c->chip_scsi_id;
- leftlen = cplen;
- totlen = len = 0;
+ seq_printf(m,
+ "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
- len = asc_prt_line(cp, leftlen,
- "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, "
- "mcode_version 0x%x, err_code %u\n",
- c->chip_version, c->mcode_date, c->mcode_version,
- v->err_code);
- ASC_PRT_NEXT();
+ seq_printf(m, " chip_version %u, mcode_date 0x%x, "
+ "mcode_version 0x%x, err_code %u\n",
+ c->chip_version, c->mcode_date, c->mcode_version,
+ v->err_code);
/* Current number of commands waiting for the host. */
- len = asc_prt_line(cp, leftlen,
- " Total Command Pending: %d\n", v->cur_total_qng);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " Total Command Pending: %d\n", v->cur_total_qng);
- len = asc_prt_line(cp, leftlen, " Command Queuing:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Command Queuing:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%c",
- i,
- (v->
- use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ?
- 'Y' : 'N');
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%c",
+ i,
+ (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
/* Current number of commands waiting for a device. */
- len = asc_prt_line(cp, leftlen, " Command Queue Pending:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Command Queue Pending:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%u", i, v->cur_dvc_qng[i]);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]);
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
/* Current limit on number of commands that can be sent to a device. */
- len = asc_prt_line(cp, leftlen, " Command Queue Limit:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Command Queue Limit:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%u", i, v->max_dvc_qng[i]);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]);
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
/* Indicate whether the device has returned queue full status. */
- len = asc_prt_line(cp, leftlen, " Command Queue Full:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Command Queue Full:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) {
- len = asc_prt_line(cp, leftlen, " %X:Y-%d",
- i, boardp->queue_full_cnt[i]);
- } else {
- len = asc_prt_line(cp, leftlen, " %X:N", i);
- }
- ASC_PRT_NEXT();
+ if (boardp->queue_full & ADV_TID_TO_TIDMASK(i))
+ seq_printf(m, " %X:Y-%d",
+ i, boardp->queue_full_cnt[i]);
+ else
+ seq_printf(m, " %X:N", i);
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
- len = asc_prt_line(cp, leftlen, " Synchronous Transfer:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Synchronous Transfer:");
for (i = 0; i <= ASC_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%c",
- i,
- (v->
- sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%c",
+ i,
+ (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
for (i = 0; i <= ASC_MAX_TID; i++) {
uchar syn_period_ix;
@@ -3728,69 +3473,48 @@ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen)
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:", i);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:", i);
if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) {
- len = asc_prt_line(cp, leftlen, " Asynchronous");
- ASC_PRT_NEXT();
+ seq_printf(m, " Asynchronous");
} else {
syn_period_ix =
(boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index -
1);
- len = asc_prt_line(cp, leftlen,
- " Transfer Period Factor: %d (%d.%d Mhz),",
- v->sdtr_period_tbl[syn_period_ix],
- 250 /
- v->sdtr_period_tbl[syn_period_ix],
- ASC_TENTHS(250,
- v->
- sdtr_period_tbl
- [syn_period_ix]));
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " Transfer Period Factor: %d (%d.%d Mhz),",
+ v->sdtr_period_tbl[syn_period_ix],
+ 250 / v->sdtr_period_tbl[syn_period_ix],
+ ASC_TENTHS(250,
+ v->sdtr_period_tbl[syn_period_ix]));
- len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d",
- boardp->
- sdtr_data[i] & ASC_SYN_MAX_OFFSET);
- ASC_PRT_NEXT();
+ seq_printf(m, " REQ/ACK Offset: %d",
+ boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET);
}
if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
- len = asc_prt_line(cp, leftlen, "*\n");
+ seq_printf(m, "*\n");
renegotiate = 1;
} else {
- len = asc_prt_line(cp, leftlen, "\n");
+ seq_printf(m, "\n");
}
- ASC_PRT_NEXT();
}
if (renegotiate) {
- len = asc_prt_line(cp, leftlen,
- " * = Re-negotiation pending before next command.\n");
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " * = Re-negotiation pending before next command.\n");
}
-
- return totlen;
}
/*
* asc_prt_adv_board_info()
*
* Print dynamic board configuration information.
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- int leftlen;
- int totlen;
- int len;
int i;
ADV_DVC_VAR *v;
ADV_DVC_CFG *c;
@@ -3809,47 +3533,35 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen)
iop_base = v->iop_base;
chip_scsi_id = v->chip_scsi_id;
- leftlen = cplen;
- totlen = len = 0;
-
- len = asc_prt_line(cp, leftlen,
- "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
- len = asc_prt_line(cp, leftlen,
- " iop_base 0x%lx, cable_detect: %X, err_code %u\n",
- v->iop_base,
- AdvReadWordRegister(iop_base,
- IOPW_SCSI_CFG1) & CABLE_DETECT,
- v->err_code);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " iop_base 0x%lx, cable_detect: %X, err_code %u\n",
+ (unsigned long)v->iop_base,
+ AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT,
+ v->err_code);
- len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, "
- "mcode_version 0x%x\n", c->chip_version,
- c->mcode_date, c->mcode_version);
- ASC_PRT_NEXT();
+ seq_printf(m, " chip_version %u, mcode_date 0x%x, "
+ "mcode_version 0x%x\n", c->chip_version,
+ c->mcode_date, c->mcode_version);
AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
- len = asc_prt_line(cp, leftlen, " Queuing Enabled:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Queuing Enabled:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%c",
- i,
- (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%c",
+ i,
+ (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
- len = asc_prt_line(cp, leftlen, " Queue Limit:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Queue Limit:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3859,14 +3571,11 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen)
AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i,
lrambyte);
- len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%d", i, lrambyte);
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
- len = asc_prt_line(cp, leftlen, " Command Pending:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Command Pending:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3876,33 +3585,26 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen)
AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i,
lrambyte);
- len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%d", i, lrambyte);
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
- len = asc_prt_line(cp, leftlen, " Wide Enabled:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Wide Enabled:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%c",
- i,
- (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%c",
+ i,
+ (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done);
- len = asc_prt_line(cp, leftlen, " Transfer Bit Width:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Transfer Bit Width:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3913,37 +3615,30 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen)
ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
lramword);
- len = asc_prt_line(cp, leftlen, " %X:%d",
- i, (lramword & 0x8000) ? 16 : 8);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%d",
+ i, (lramword & 0x8000) ? 16 : 8);
if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) &&
(wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
- len = asc_prt_line(cp, leftlen, "*");
- ASC_PRT_NEXT();
+ seq_printf(m, "*");
renegotiate = 1;
}
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
- len = asc_prt_line(cp, leftlen, " Synchronous Enabled:");
- ASC_PRT_NEXT();
+ seq_printf(m, " Synchronous Enabled:");
for (i = 0; i <= ADV_MAX_TID; i++) {
if ((chip_scsi_id == i) ||
((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:%c",
- i,
- (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' :
- 'N');
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:%c",
+ i,
+ (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
}
- len = asc_prt_line(cp, leftlen, "\n");
- ASC_PRT_NEXT();
+ seq_printf(m, "\n");
AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done);
for (i = 0; i <= ADV_MAX_TID; i++) {
@@ -3959,358 +3654,170 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen)
continue;
}
- len = asc_prt_line(cp, leftlen, " %X:", i);
- ASC_PRT_NEXT();
+ seq_printf(m, " %X:", i);
if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */
- len = asc_prt_line(cp, leftlen, " Asynchronous");
- ASC_PRT_NEXT();
+ seq_printf(m, " Asynchronous");
} else {
- len =
- asc_prt_line(cp, leftlen,
- " Transfer Period Factor: ");
- ASC_PRT_NEXT();
+ seq_printf(m, " Transfer Period Factor: ");
if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */
- len =
- asc_prt_line(cp, leftlen, "9 (80.0 Mhz),");
- ASC_PRT_NEXT();
+ seq_printf(m, "9 (80.0 Mhz),");
} else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */
- len =
- asc_prt_line(cp, leftlen, "10 (40.0 Mhz),");
- ASC_PRT_NEXT();
+ seq_printf(m, "10 (40.0 Mhz),");
} else { /* 20 Mhz or below. */
period = (((lramword >> 8) * 25) + 50) / 4;
if (period == 0) { /* Should never happen. */
- len =
- asc_prt_line(cp, leftlen,
- "%d (? Mhz), ");
- ASC_PRT_NEXT();
+ seq_printf(m, "%d (? Mhz), ", period);
} else {
- len = asc_prt_line(cp, leftlen,
- "%d (%d.%d Mhz),",
- period, 250 / period,
- ASC_TENTHS(250,
- period));
- ASC_PRT_NEXT();
+ seq_printf(m,
+ "%d (%d.%d Mhz),",
+ period, 250 / period,
+ ASC_TENTHS(250, period));
}
}
- len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d",
- lramword & 0x1F);
- ASC_PRT_NEXT();
+ seq_printf(m, " REQ/ACK Offset: %d",
+ lramword & 0x1F);
}
if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
- len = asc_prt_line(cp, leftlen, "*\n");
+ seq_printf(m, "*\n");
renegotiate = 1;
} else {
- len = asc_prt_line(cp, leftlen, "\n");
+ seq_printf(m, "\n");
}
- ASC_PRT_NEXT();
}
if (renegotiate) {
- len = asc_prt_line(cp, leftlen,
- " * = Re-negotiation pending before next command.\n");
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " * = Re-negotiation pending before next command.\n");
}
-
- return totlen;
-}
-
-/*
- * asc_proc_copy()
- *
- * Copy proc information to a read buffer taking into account the current
- * read offset in the file and the remaining space in the read buffer.
- */
-static int
-asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen,
- char *cp, int cplen)
-{
- int cnt = 0;
-
- ASC_DBG(2, "offset %d, advoffset %d, cplen %d\n",
- (unsigned)offset, (unsigned)advoffset, cplen);
- if (offset <= advoffset) {
- /* Read offset below current offset, copy everything. */
- cnt = min(cplen, leftlen);
- ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n",
- (ulong)curbuf, (ulong)cp, cnt);
- memcpy(curbuf, cp, cnt);
- } else if (offset < advoffset + cplen) {
- /* Read offset within current range, partial copy. */
- cnt = (advoffset + cplen) - offset;
- cp = (cp + cplen) - cnt;
- cnt = min(cnt, leftlen);
- ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n",
- (ulong)curbuf, (ulong)cp, cnt);
- memcpy(curbuf, cp, cnt);
- }
- return cnt;
}
#ifdef ADVANSYS_STATS
/*
* asc_prt_board_stats()
- *
- * Note: no single line should be greater than ASC_PRTLINE_SIZE,
- * cf. asc_prt_line().
- *
- * Return the number of characters copied into 'cp'. No more than
- * 'cplen' characters will be copied to 'cp'.
*/
-static int asc_prt_board_stats(struct Scsi_Host *shost, char *cp, int cplen)
+static void asc_prt_board_stats(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
struct asc_stats *s = &boardp->asc_stats;
- int leftlen = cplen;
- int len, totlen = 0;
+ seq_printf(m,
+ "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
- len = asc_prt_line(cp, leftlen,
- "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n",
- shost->host_no);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " queuecommand %u, reset %u, biosparam %u, interrupt %u\n",
+ s->queuecommand, s->reset, s->biosparam,
+ s->interrupt);
- len = asc_prt_line(cp, leftlen,
- " queuecommand %lu, reset %lu, biosparam %lu, interrupt %lu\n",
- s->queuecommand, s->reset, s->biosparam,
- s->interrupt);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " callback %u, done %u, build_error %u, build_noreq %u, build_nosg %u\n",
+ s->callback, s->done, s->build_error,
+ s->adv_build_noreq, s->adv_build_nosg);
- len = asc_prt_line(cp, leftlen,
- " callback %lu, done %lu, build_error %lu, build_noreq %lu, build_nosg %lu\n",
- s->callback, s->done, s->build_error,
- s->adv_build_noreq, s->adv_build_nosg);
- ASC_PRT_NEXT();
-
- len = asc_prt_line(cp, leftlen,
- " exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n",
- s->exe_noerror, s->exe_busy, s->exe_error,
- s->exe_unknown);
- ASC_PRT_NEXT();
+ seq_printf(m,
+ " exe_noerror %u, exe_busy %u, exe_error %u, exe_unknown %u\n",
+ s->exe_noerror, s->exe_busy, s->exe_error,
+ s->exe_unknown);
/*
* Display data transfer statistics.
*/
if (s->xfer_cnt > 0) {
- len = asc_prt_line(cp, leftlen, " xfer_cnt %lu, xfer_elem %lu, ",
- s->xfer_cnt, s->xfer_elem);
- ASC_PRT_NEXT();
+ seq_printf(m, " xfer_cnt %u, xfer_elem %u, ",
+ s->xfer_cnt, s->xfer_elem);
- len = asc_prt_line(cp, leftlen, "xfer_bytes %lu.%01lu kb\n",
- s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2));
- ASC_PRT_NEXT();
+ seq_printf(m, "xfer_bytes %u.%01u kb\n",
+ s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2));
/* Scatter gather transfer statistics */
- len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ",
- s->xfer_elem / s->xfer_cnt,
- ASC_TENTHS(s->xfer_elem, s->xfer_cnt));
- ASC_PRT_NEXT();
+ seq_printf(m, " avg_num_elem %u.%01u, ",
+ s->xfer_elem / s->xfer_cnt,
+ ASC_TENTHS(s->xfer_elem, s->xfer_cnt));
- len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ",
- (s->xfer_sect / 2) / s->xfer_elem,
- ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem));
- ASC_PRT_NEXT();
+ seq_printf(m, "avg_elem_size %u.%01u kb, ",
+ (s->xfer_sect / 2) / s->xfer_elem,
+ ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem));
- len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n",
- (s->xfer_sect / 2) / s->xfer_cnt,
- ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt));
- ASC_PRT_NEXT();
+ seq_printf(m, "avg_xfer_size %u.%01u kb\n",
+ (s->xfer_sect / 2) / s->xfer_cnt,
+ ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt));
}
-
- return totlen;
}
#endif /* ADVANSYS_STATS */
/*
- * advansys_proc_info() - /proc/scsi/advansys/{0,1,2,3,...}
+ * advansys_show_info() - /proc/scsi/advansys/{0,1,2,3,...}
*
- * *buffer: I/O buffer
- * **start: if inout == FALSE pointer into buffer where user read should start
- * offset: current offset into a /proc/scsi/advansys/[0...] file
- * length: length of buffer
- * hostno: Scsi_Host host_no
- * inout: TRUE - user is writing; FALSE - user is reading
+ * m: seq_file to print into
+ * shost: Scsi_Host
*
* Return the number of bytes read from or written to a
* /proc/scsi/advansys/[0...] file.
- *
- * Note: This function uses the per board buffer 'prtbuf' which is
- * allocated when the board is initialized in advansys_detect(). The
- * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is
- * used to write to the buffer. The way asc_proc_copy() is written
- * if 'prtbuf' is too small it will not be overwritten. Instead the
- * user just won't get all the available statistics.
*/
static int
-advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
- off_t offset, int length, int inout)
+advansys_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- char *cp;
- int cplen;
- int cnt;
- int totcnt;
- int leftlen;
- char *curbuf;
- off_t advoffset;
ASC_DBG(1, "begin\n");
/*
- * User write not supported.
- */
- if (inout == TRUE)
- return -ENOSYS;
-
- /*
* User read of /proc/scsi/advansys/[0...] file.
*/
- /* Copy read data starting at the beginning of the buffer. */
- *start = buffer;
- curbuf = buffer;
- advoffset = 0;
- totcnt = 0;
- leftlen = length;
-
/*
* Get board configuration information.
*
* advansys_info() returns the board string from its own static buffer.
*/
- cp = (char *)advansys_info(shost);
- strcat(cp, "\n");
- cplen = strlen(cp);
/* Copy board information. */
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
-
+ seq_printf(m, "%s\n", (char *)advansys_info(shost));
/*
* Display Wide Board BIOS Information.
*/
- if (!ASC_NARROW_BOARD(boardp)) {
- cp = boardp->prtbuf;
- cplen = asc_prt_adv_bios(shost, cp, ASC_PRTBUF_SIZE);
- BUG_ON(cplen >= ASC_PRTBUF_SIZE);
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp,
- cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
- }
+ if (!ASC_NARROW_BOARD(boardp))
+ asc_prt_adv_bios(m, shost);
/*
* Display driver information for each device attached to the board.
*/
- cp = boardp->prtbuf;
- cplen = asc_prt_board_devices(shost, cp, ASC_PRTBUF_SIZE);
- BUG_ON(cplen >= ASC_PRTBUF_SIZE);
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
+ asc_prt_board_devices(m, shost);
/*
* Display EEPROM configuration for the board.
*/
- cp = boardp->prtbuf;
- if (ASC_NARROW_BOARD(boardp)) {
- cplen = asc_prt_asc_board_eeprom(shost, cp, ASC_PRTBUF_SIZE);
- } else {
- cplen = asc_prt_adv_board_eeprom(shost, cp, ASC_PRTBUF_SIZE);
- }
- BUG_ON(cplen >= ASC_PRTBUF_SIZE);
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
+ if (ASC_NARROW_BOARD(boardp))
+ asc_prt_asc_board_eeprom(m, shost);
+ else
+ asc_prt_adv_board_eeprom(m, shost);
/*
* Display driver configuration and information for the board.
*/
- cp = boardp->prtbuf;
- cplen = asc_prt_driver_conf(shost, cp, ASC_PRTBUF_SIZE);
- BUG_ON(cplen >= ASC_PRTBUF_SIZE);
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
+ asc_prt_driver_conf(m, shost);
#ifdef ADVANSYS_STATS
/*
* Display driver statistics for the board.
*/
- cp = boardp->prtbuf;
- cplen = asc_prt_board_stats(shost, cp, ASC_PRTBUF_SIZE);
- BUG_ON(cplen >= ASC_PRTBUF_SIZE);
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
+ asc_prt_board_stats(m, shost);
#endif /* ADVANSYS_STATS */
/*
* Display Asc Library dynamic configuration information
* for the board.
*/
- cp = boardp->prtbuf;
- if (ASC_NARROW_BOARD(boardp)) {
- cplen = asc_prt_asc_board_info(shost, cp, ASC_PRTBUF_SIZE);
- } else {
- cplen = asc_prt_adv_board_info(shost, cp, ASC_PRTBUF_SIZE);
- }
- BUG_ON(cplen >= ASC_PRTBUF_SIZE);
- cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
- totcnt += cnt;
- leftlen -= cnt;
- if (leftlen == 0) {
- ASC_DBG(1, "totcnt %d\n", totcnt);
- return totcnt;
- }
- advoffset += cplen;
- curbuf += cnt;
-
- ASC_DBG(1, "totcnt %d\n", totcnt);
-
- return totcnt;
+ if (ASC_NARROW_BOARD(boardp))
+ asc_prt_asc_board_info(m, shost);
+ else
+ asc_prt_adv_board_info(m, shost);
+ return 0;
}
#endif /* CONFIG_PROC_FS */
@@ -11743,7 +11250,7 @@ static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
static struct scsi_host_template advansys_template = {
.proc_name = DRV_NAME,
#ifdef CONFIG_PROC_FS
- .proc_info = advansys_proc_info,
+ .show_info = advansys_show_info,
#endif
.name = DRV_NAME,
.info = advansys_info,
@@ -11939,20 +11446,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
#endif /* CONFIG_PCI */
}
-#ifdef CONFIG_PROC_FS
- /*
- * Allocate buffer for printing information from
- * /proc/scsi/advansys/[0...].
- */
- boardp->prtbuf = kmalloc(ASC_PRTBUF_SIZE, GFP_KERNEL);
- if (!boardp->prtbuf) {
- shost_printk(KERN_ERR, shost, "kmalloc(%d) returned NULL\n",
- ASC_PRTBUF_SIZE);
- ret = -ENOMEM;
- goto err_unmap;
- }
-#endif /* CONFIG_PROC_FS */
-
if (ASC_NARROW_BOARD(boardp)) {
/*
* Set the board bus type and PCI IRQ before
@@ -12010,7 +11503,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
}
if (ret)
- goto err_free_proc;
+ goto err_unmap;
/*
* Save the EEPROM configuration so that it can be displayed
@@ -12055,7 +11548,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ASC_DBG(2, "AscInitSetConfig()\n");
ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0;
if (ret)
- goto err_free_proc;
+ goto err_unmap;
} else {
ADVEEP_3550_CONFIG *ep_3550;
ADVEEP_38C0800_CONFIG *ep_38C0800;
@@ -12290,7 +11783,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
shost_printk(KERN_ERR, shost, "request_dma() "
"%d failed %d\n",
shost->dma_channel, ret);
- goto err_free_proc;
+ goto err_unmap;
}
AscEnableIsaDma(shost->dma_channel);
}
@@ -12371,8 +11864,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
if (shost->dma_channel != NO_ISA_DMA)
free_dma(shost->dma_channel);
#endif
- err_free_proc:
- kfree(boardp->prtbuf);
err_unmap:
if (boardp->ioremap_addr)
iounmap(boardp->ioremap_addr);
@@ -12406,7 +11897,6 @@ static int advansys_release(struct Scsi_Host *shost)
iounmap(board->ioremap_addr);
advansys_wide_free_mem(board);
}
- kfree(board->prtbuf);
scsi_host_put(shost);
ASC_DBG(1, "end\n");
return 0;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index a284be1..3f7b6fe 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2977,11 +2977,10 @@ static void show_queues(struct Scsi_Host *shpnt)
}
#undef SPRINTF
-#define SPRINTF(args...) pos += sprintf(pos, ## args)
+#define SPRINTF(args...) seq_printf(m, ##args)
-static int get_command(char *pos, Scsi_Cmnd * ptr)
+static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
{
- char *start = pos;
int i;
SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
@@ -3011,13 +3010,10 @@ static int get_command(char *pos, Scsi_Cmnd * ptr)
if (ptr->SCp.phase & syncneg)
SPRINTF("syncneg|");
SPRINTF("; next=0x%p\n", SCNEXT(ptr));
-
- return (pos - start);
}
-static int get_ports(struct Scsi_Host *shpnt, char *pos)
+static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt)
{
- char *start = pos;
int s;
SPRINTF("\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name);
@@ -3273,11 +3269,9 @@ static int get_ports(struct Scsi_Host *shpnt, char *pos)
if (s & ENREQINIT)
SPRINTF("ENREQINIT ");
SPRINTF(")\n");
-
- return (pos - start);
}
-static int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
+static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
{
if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0)
return -EINVAL;
@@ -3320,26 +3314,11 @@ static int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
return length;
}
-#undef SPRINTF
-#define SPRINTF(args...) \
- do { if(pos < buffer + length) pos += sprintf(pos, ## args); } while(0)
-
-static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start,
- off_t offset, int length, int inout)
+static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
{
int i;
- char *pos = buffer;
Scsi_Cmnd *ptr;
unsigned long flags;
- int thislength;
-
- DPRINTK(debug_procinfo,
- KERN_DEBUG "aha152x_proc_info: buffer=%p offset=%ld length=%d hostno=%d inout=%d\n",
- buffer, offset, length, shpnt->host_no, inout);
-
-
- if (inout)
- return aha152x_set_info(buffer, length, shpnt);
SPRINTF(AHA152X_REVID "\n");
@@ -3392,25 +3371,25 @@ static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start
if (ISSUE_SC) {
SPRINTF("not yet issued commands:\n");
for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr))
- pos += get_command(pos, ptr);
+ get_command(m, ptr);
} else
SPRINTF("no not yet issued commands\n");
DO_UNLOCK(flags);
if (CURRENT_SC) {
SPRINTF("current command:\n");
- pos += get_command(pos, CURRENT_SC);
+ get_command(m, CURRENT_SC);
} else
SPRINTF("no current command\n");
if (DISCONNECTED_SC) {
SPRINTF("disconnected commands:\n");
for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr))
- pos += get_command(pos, ptr);
+ get_command(m, ptr);
} else
SPRINTF("no disconnected commands\n");
- pos += get_ports(shpnt, pos);
+ get_ports(m, shpnt);
#if defined(AHA152X_STAT)
SPRINTF("statistics:\n"
@@ -3440,24 +3419,7 @@ static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start
HOSTDATA(shpnt)->time[i]);
}
#endif
-
- DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: pos=%p\n", pos);
-
- thislength = pos - (buffer + offset);
- DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: length=%d thislength=%d\n", length, thislength);
-
- if(thislength<0) {
- DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: output too short\n");
- *start = NULL;
- return 0;
- }
-
- thislength = thislength<length ? thislength : length;
-
- DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: return %d\n", thislength);
-
- *start = buffer + offset;
- return thislength < length ? thislength : length;
+ return 0;
}
static int aha152x_adjust_queue(struct scsi_device *device)
@@ -3470,7 +3432,8 @@ static struct scsi_host_template aha152x_driver_template = {
.module = THIS_MODULE,
.name = AHA152X_REVID,
.proc_name = "aha152x",
- .proc_info = aha152x_proc_info,
+ .show_info = aha152x_show_info,
+ .write_info = aha152x_set_info,
.queuecommand = aha152x_queue,
.eh_abort_handler = aha152x_abort,
.eh_device_reset_handler = aha152x_device_reset,
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index df775e6..5f31017 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -106,33 +106,14 @@ static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu)
return hdata->ecb_dma_addr + offset;
}
-static int aha1740_proc_info(struct Scsi_Host *shpnt, char *buffer,
- char **start, off_t offset,
- int length, int inout)
+static int aha1740_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
{
- int len;
- struct aha1740_hostdata *host;
-
- if (inout)
- return-ENOSYS;
-
- host = HOSTDATA(shpnt);
-
- len = sprintf(buffer, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n"
+ struct aha1740_hostdata *host = HOSTDATA(shpnt);
+ seq_printf(m, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n"
"Extended translation %sabled.\n",
shpnt->io_port, shpnt->irq, host->edev->slot,
host->translation ? "en" : "dis");
-
- if (offset > len) {
- *start = buffer;
- return 0;
- }
-
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- return len;
+ return 0;
}
static int aha1740_makecode(unchar *sense, unchar *status)
@@ -556,7 +537,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
static struct scsi_host_template aha1740_template = {
.module = THIS_MODULE,
.proc_name = "aha1740",
- .proc_info = aha1740_proc_info,
+ .show_info = aha1740_show_info,
.name = "Adaptec 174x (EISA)",
.queuecommand = aha1740_queuecommand,
.bios_param = aha1740_biosparam,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 9328121..69d5c43 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -906,7 +906,8 @@ struct scsi_host_template aic79xx_driver_template = {
.module = THIS_MODULE,
.name = "aic79xx",
.proc_name = "aic79xx",
- .proc_info = ahd_linux_proc_info,
+ .show_info = ahd_linux_show_info,
+ .write_info = ahd_proc_write_seeprom,
.info = ahd_linux_info,
.queuecommand = ahd_linux_queue,
.eh_abort_handler = ahd_linux_abort,
@@ -1702,19 +1703,13 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
switch (code) {
case AC_TRANSFER_NEG:
{
- char buf[80];
struct scsi_target *starget;
- struct info_str info;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
unsigned int target_ppr_options;
BUG_ON(target == CAM_TARGET_WILDCARD);
- info.buffer = buf;
- info.length = sizeof(buf);
- info.offset = 0;
- info.pos = 0;
tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
target, &tstate);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 28e4349..c58fa33 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -379,14 +379,6 @@ void ahd_insb(struct ahd_softc * ahd, long port,
int ahd_linux_register_host(struct ahd_softc *,
struct scsi_host_template *);
-/*************************** Pretty Printing **********************************/
-struct info_str {
- char *buffer;
- int length;
- off_t offset;
- int pos;
-};
-
/******************************** Locking *************************************/
static inline void
ahd_lockinit(struct ahd_softc *ahd)
@@ -513,8 +505,8 @@ ahd_flush_device_writes(struct ahd_softc *ahd)
}
/**************************** Proc FS Support *********************************/
-int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
- off_t, int, int);
+int ahd_proc_write_seeprom(struct Scsi_Host *, char *, int);
+int ahd_linux_show_info(struct seq_file *,struct Scsi_Host *);
/*********************** Transaction Access Wrappers **************************/
static inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 59c85d5..e9778b4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -42,16 +42,12 @@
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
-static void copy_mem_info(struct info_str *info, char *data, int len);
-static int copy_info(struct info_str *info, char *fmt, ...);
static void ahd_dump_target_state(struct ahd_softc *ahd,
- struct info_str *info,
+ struct seq_file *m,
u_int our_id, char channel,
u_int target_id);
-static void ahd_dump_device_state(struct info_str *info,
+static void ahd_dump_device_state(struct seq_file *m,
struct scsi_device *sdev);
-static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
- char *buffer, int length);
/*
* Table of syncrates that don't follow the "divisible by 4"
@@ -93,58 +89,15 @@ ahd_calc_syncsrate(u_int period_factor)
return (10000000 / (period_factor * 4 * 10));
}
-
-static void
-copy_mem_info(struct info_str *info, char *data, int len)
-{
- if (info->pos + len > info->offset + info->length)
- len = info->offset + info->length - info->pos;
-
- if (info->pos + len < info->offset) {
- info->pos += len;
- return;
- }
-
- if (info->pos < info->offset) {
- off_t partial;
-
- partial = info->offset - info->pos;
- data += partial;
- info->pos += partial;
- len -= partial;
- }
-
- if (len > 0) {
- memcpy(info->buffer, data, len);
- info->pos += len;
- info->buffer += len;
- }
-}
-
-static int
-copy_info(struct info_str *info, char *fmt, ...)
-{
- va_list args;
- char buf[256];
- int len;
-
- va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
- va_end(args);
-
- copy_mem_info(info, buf, len);
- return (len);
-}
-
static void
-ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
+ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
{
u_int speed;
u_int freq;
u_int mb;
if (tinfo->period == AHD_PERIOD_UNKNOWN) {
- copy_info(info, "Renegotiation Pending\n");
+ seq_printf(m, "Renegotiation Pending\n");
return;
}
speed = 3300;
@@ -156,34 +109,34 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
speed *= (0x01 << tinfo->width);
mb = speed / 1000;
if (mb > 0)
- copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000);
+ seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
else
- copy_info(info, "%dKB/s transfers", speed);
+ seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
int printed_options;
printed_options = 0;
- copy_info(info, " (%d.%03dMHz", freq / 1000, freq % 1000);
+ seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000);
if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
- copy_info(info, " RDSTRM");
+ seq_printf(m, " RDSTRM");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
- copy_info(info, "%s", printed_options ? "|DT" : " DT");
+ seq_printf(m, "%s", printed_options ? "|DT" : " DT");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
- copy_info(info, "%s", printed_options ? "|IU" : " IU");
+ seq_printf(m, "%s", printed_options ? "|IU" : " IU");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) {
- copy_info(info, "%s",
+ seq_printf(m, "%s",
printed_options ? "|RTI" : " RTI");
printed_options++;
}
if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
- copy_info(info, "%s",
+ seq_printf(m, "%s",
printed_options ? "|QAS" : " QAS");
printed_options++;
}
@@ -191,19 +144,19 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
if (tinfo->width > 0) {
if (freq != 0) {
- copy_info(info, ", ");
+ seq_printf(m, ", ");
} else {
- copy_info(info, " (");
+ seq_printf(m, " (");
}
- copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width));
+ seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
} else if (freq != 0) {
- copy_info(info, ")");
+ seq_printf(m, ")");
}
- copy_info(info, "\n");
+ seq_printf(m, "\n");
}
static void
-ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
+ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m,
u_int our_id, char channel, u_int target_id)
{
struct scsi_target *starget;
@@ -213,17 +166,17 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
target_id, &tstate);
- copy_info(info, "Target %d Negotiation Settings\n", target_id);
- copy_info(info, "\tUser: ");
- ahd_format_transinfo(info, &tinfo->user);
+ seq_printf(m, "Target %d Negotiation Settings\n", target_id);
+ seq_printf(m, "\tUser: ");
+ ahd_format_transinfo(m, &tinfo->user);
starget = ahd->platform_data->starget[target_id];
if (starget == NULL)
return;
- copy_info(info, "\tGoal: ");
- ahd_format_transinfo(info, &tinfo->goal);
- copy_info(info, "\tCurr: ");
- ahd_format_transinfo(info, &tinfo->curr);
+ seq_printf(m, "\tGoal: ");
+ ahd_format_transinfo(m, &tinfo->goal);
+ seq_printf(m, "\tCurr: ");
+ ahd_format_transinfo(m, &tinfo->curr);
for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
struct scsi_device *dev;
@@ -233,29 +186,30 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
if (dev == NULL)
continue;
- ahd_dump_device_state(info, dev);
+ ahd_dump_device_state(m, dev);
}
}
static void
-ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev)
+ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev)
{
struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
- copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
+ seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n",
sdev->sdev_target->channel + 'A',
sdev->sdev_target->id, sdev->lun);
- copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
- copy_info(info, "\t\tCommands Active %d\n", dev->active);
- copy_info(info, "\t\tCommand Openings %d\n", dev->openings);
- copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags);
- copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
+ seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued);
+ seq_printf(m, "\t\tCommands Active %d\n", dev->active);
+ seq_printf(m, "\t\tCommand Openings %d\n", dev->openings);
+ seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags);
+ seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
}
-static int
-ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length)
+int
+ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
{
+ struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
ahd_mode_state saved_modes;
int have_seeprom;
u_long s;
@@ -319,64 +273,45 @@ done:
* Return information to handle /proc support for the driver.
*/
int
-ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
- off_t offset, int length, int inout)
+ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
- struct info_str info;
char ahd_info[256];
u_int max_targ;
u_int i;
- int retval;
- /* Has data been written to the file? */
- if (inout == TRUE) {
- retval = ahd_proc_write_seeprom(ahd, buffer, length);
- goto done;
- }
-
- if (start)
- *start = buffer;
-
- info.buffer = buffer;
- info.length = length;
- info.offset = offset;
- info.pos = 0;
-
- copy_info(&info, "Adaptec AIC79xx driver version: %s\n",
+ seq_printf(m, "Adaptec AIC79xx driver version: %s\n",
AIC79XX_DRIVER_VERSION);
- copy_info(&info, "%s\n", ahd->description);
+ seq_printf(m, "%s\n", ahd->description);
ahd_controller_info(ahd, ahd_info);
- copy_info(&info, "%s\n", ahd_info);
- copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n",
+ seq_printf(m, "%s\n", ahd_info);
+ seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n",
ahd->scb_data.numscbs, AHD_NSEG);
max_targ = 16;
if (ahd->seep_config == NULL)
- copy_info(&info, "No Serial EEPROM\n");
+ seq_printf(m, "No Serial EEPROM\n");
else {
- copy_info(&info, "Serial EEPROM:\n");
+ seq_printf(m, "Serial EEPROM:\n");
for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) {
if (((i % 8) == 0) && (i != 0)) {
- copy_info(&info, "\n");
+ seq_printf(m, "\n");
}
- copy_info(&info, "0x%.4x ",
+ seq_printf(m, "0x%.4x ",
((uint16_t*)ahd->seep_config)[i]);
}
- copy_info(&info, "\n");
+ seq_printf(m, "\n");
}
- copy_info(&info, "\n");
+ seq_printf(m, "\n");
if ((ahd->features & AHD_WIDE) == 0)
max_targ = 8;
for (i = 0; i < max_targ; i++) {
- ahd_dump_target_state(ahd, &info, ahd->our_id, 'A',
+ ahd_dump_target_state(ahd, m, ahd->our_id, 'A',
/*target_id*/i);
}
- retval = info.pos > info.offset ? info.pos - info.offset : 0;
-done:
- return (retval);
+ return 0;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 5a477cd..c0c6258 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -803,7 +803,8 @@ struct scsi_host_template aic7xxx_driver_template = {
.module = THIS_MODULE,
.name = "aic7xxx",
.proc_name = "aic7xxx",
- .proc_info = ahc_linux_proc_info,
+ .show_info = ahc_linux_show_info,
+ .write_info = ahc_proc_write_seeprom,
.info = ahc_linux_info,
.queuecommand = ahc_linux_queue,
.eh_abort_handler = ahc_linux_abort,
@@ -1631,10 +1632,8 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
switch (code) {
case AC_TRANSFER_NEG:
{
- char buf[80];
struct scsi_target *starget;
struct ahc_linux_target *targ;
- struct info_str info;
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
int target_offset;
@@ -1642,10 +1641,6 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
BUG_ON(target == CAM_TARGET_WILDCARD);
- info.buffer = buf;
- info.length = sizeof(buf);
- info.offset = 0;
- info.pos = 0;
tinfo = ahc_fetch_transinfo(ahc, channel,
channel == 'A' ? ahc->our_id
: ahc->our_id_b,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index bca0fb83..bc4cca9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -383,14 +383,6 @@ void ahc_insb(struct ahc_softc * ahc, long port,
int ahc_linux_register_host(struct ahc_softc *,
struct scsi_host_template *);
-/*************************** Pretty Printing **********************************/
-struct info_str {
- char *buffer;
- int length;
- off_t offset;
- int pos;
-};
-
/******************************** Locking *************************************/
/* Lock protecting internal data structures */
@@ -523,8 +515,8 @@ ahc_flush_device_writes(struct ahc_softc *ahc)
}
/**************************** Proc FS Support *********************************/
-int ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
- off_t, int, int);
+int ahc_proc_write_seeprom(struct Scsi_Host *, char *, int);
+int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *);
/*************************** Domain Validation ********************************/
/*********************** Transaction Access Wrappers *************************/
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index f2525f8..383a3d1 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -43,16 +43,12 @@
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-static void copy_mem_info(struct info_str *info, char *data, int len);
-static int copy_info(struct info_str *info, char *fmt, ...);
static void ahc_dump_target_state(struct ahc_softc *ahc,
- struct info_str *info,
+ struct seq_file *m,
u_int our_id, char channel,
u_int target_id, u_int target_offset);
-static void ahc_dump_device_state(struct info_str *info,
+static void ahc_dump_device_state(struct seq_file *m,
struct scsi_device *dev);
-static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
- char *buffer, int length);
/*
* Table of syncrates that don't follow the "divisible by 4"
@@ -94,51 +90,8 @@ ahc_calc_syncsrate(u_int period_factor)
return (10000000 / (period_factor * 4 * 10));
}
-
-static void
-copy_mem_info(struct info_str *info, char *data, int len)
-{
- if (info->pos + len > info->offset + info->length)
- len = info->offset + info->length - info->pos;
-
- if (info->pos + len < info->offset) {
- info->pos += len;
- return;
- }
-
- if (info->pos < info->offset) {
- off_t partial;
-
- partial = info->offset - info->pos;
- data += partial;
- info->pos += partial;
- len -= partial;
- }
-
- if (len > 0) {
- memcpy(info->buffer, data, len);
- info->pos += len;
- info->buffer += len;
- }
-}
-
-static int
-copy_info(struct info_str *info, char *fmt, ...)
-{
- va_list args;
- char buf[256];
- int len;
-
- va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
- va_end(args);
-
- copy_mem_info(info, buf, len);
- return (len);
-}
-
static void
-ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
+ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo)
{
u_int speed;
u_int freq;
@@ -153,12 +106,12 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
speed *= (0x01 << tinfo->width);
mb = speed / 1000;
if (mb > 0)
- copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000);
+ seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
else
- copy_info(info, "%dKB/s transfers", speed);
+ seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
- copy_info(info, " (%d.%03dMHz%s, offset %d",
+ seq_printf(m, " (%d.%03dMHz%s, offset %d",
freq / 1000, freq % 1000,
(tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
? " DT" : "", tinfo->offset);
@@ -166,19 +119,19 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
if (tinfo->width > 0) {
if (freq != 0) {
- copy_info(info, ", ");
+ seq_printf(m, ", ");
} else {
- copy_info(info, " (");
+ seq_printf(m, " (");
}
- copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width));
+ seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
} else if (freq != 0) {
- copy_info(info, ")");
+ seq_printf(m, ")");
}
- copy_info(info, "\n");
+ seq_printf(m, "\n");
}
static void
-ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info,
+ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m,
u_int our_id, char channel, u_int target_id,
u_int target_offset)
{
@@ -190,18 +143,18 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info,
tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
target_id, &tstate);
if ((ahc->features & AHC_TWIN) != 0)
- copy_info(info, "Channel %c ", channel);
- copy_info(info, "Target %d Negotiation Settings\n", target_id);
- copy_info(info, "\tUser: ");
- ahc_format_transinfo(info, &tinfo->user);
+ seq_printf(m, "Channel %c ", channel);
+ seq_printf(m, "Target %d Negotiation Settings\n", target_id);
+ seq_printf(m, "\tUser: ");
+ ahc_format_transinfo(m, &tinfo->user);
starget = ahc->platform_data->starget[target_offset];
if (!starget)
return;
- copy_info(info, "\tGoal: ");
- ahc_format_transinfo(info, &tinfo->goal);
- copy_info(info, "\tCurr: ");
- ahc_format_transinfo(info, &tinfo->curr);
+ seq_printf(m, "\tGoal: ");
+ ahc_format_transinfo(m, &tinfo->goal);
+ seq_printf(m, "\tCurr: ");
+ ahc_format_transinfo(m, &tinfo->curr);
for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
struct scsi_device *sdev;
@@ -211,29 +164,30 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info,
if (sdev == NULL)
continue;
- ahc_dump_device_state(info, sdev);
+ ahc_dump_device_state(m, sdev);
}
}
static void
-ahc_dump_device_state(struct info_str *info, struct scsi_device *sdev)
+ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev)
{
struct ahc_linux_device *dev = scsi_transport_device_data(sdev);
- copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
+ seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n",
sdev->sdev_target->channel + 'A',
sdev->sdev_target->id, sdev->lun);
- copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
- copy_info(info, "\t\tCommands Active %d\n", dev->active);
- copy_info(info, "\t\tCommand Openings %d\n", dev->openings);
- copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags);
- copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
+ seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued);
+ seq_printf(m, "\t\tCommands Active %d\n", dev->active);
+ seq_printf(m, "\t\tCommand Openings %d\n", dev->openings);
+ seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags);
+ seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
}
-static int
-ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length)
+int
+ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
{
+ struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
struct seeprom_descriptor sd;
int have_seeprom;
u_long s;
@@ -332,53 +286,36 @@ done:
* Return information to handle /proc support for the driver.
*/
int
-ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
- off_t offset, int length, int inout)
+ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
- struct info_str info;
char ahc_info[256];
u_int max_targ;
u_int i;
- int retval;
- /* Has data been written to the file? */
- if (inout == TRUE) {
- retval = ahc_proc_write_seeprom(ahc, buffer, length);
- goto done;
- }
-
- if (start)
- *start = buffer;
-
- info.buffer = buffer;
- info.length = length;
- info.offset = offset;
- info.pos = 0;
-
- copy_info(&info, "Adaptec AIC7xxx driver version: %s\n",
+ seq_printf(m, "Adaptec AIC7xxx driver version: %s\n",
AIC7XXX_DRIVER_VERSION);
- copy_info(&info, "%s\n", ahc->description);
+ seq_printf(m, "%s\n", ahc->description);
ahc_controller_info(ahc, ahc_info);
- copy_info(&info, "%s\n", ahc_info);
- copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n",
+ seq_printf(m, "%s\n", ahc_info);
+ seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n",
ahc->scb_data->numscbs, AHC_NSEG);
if (ahc->seep_config == NULL)
- copy_info(&info, "No Serial EEPROM\n");
+ seq_printf(m, "No Serial EEPROM\n");
else {
- copy_info(&info, "Serial EEPROM:\n");
+ seq_printf(m, "Serial EEPROM:\n");
for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) {
if (((i % 8) == 0) && (i != 0)) {
- copy_info(&info, "\n");
+ seq_printf(m, "\n");
}
- copy_info(&info, "0x%.4x ",
+ seq_printf(m, "0x%.4x ",
((uint16_t*)ahc->seep_config)[i]);
}
- copy_info(&info, "\n");
+ seq_printf(m, "\n");
}
- copy_info(&info, "\n");
+ seq_printf(m, "\n");
max_targ = 16;
if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
@@ -398,10 +335,8 @@ ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
target_id = i % 8;
}
- ahc_dump_target_state(ahc, &info, our_id,
+ ahc_dump_target_state(ahc, m, our_id,
channel, target_id, i);
}
- retval = info.pos > info.offset ? info.pos - info.offset : 0;
-done:
- return (retval);
+ return 0;
}
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 5b212f0..33ec9c6 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -11108,7 +11108,7 @@ MODULE_VERSION(AIC7XXX_H_VERSION);
static struct scsi_host_template driver_template = {
- .proc_info = aic7xxx_proc_info,
+ .show_info = aic7xxx_show_info,
.detect = aic7xxx_detect,
.release = aic7xxx_release,
.info = aic7xxx_info,
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
index b07e4f0..976f45c 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
@@ -30,62 +30,23 @@
*-M*************************************************************************/
-#define BLS (&aic7xxx_buffer[size])
#define HDRB \
" 0 - 4K 4 - 16K 16 - 64K 64 - 256K 256K - 1M 1M+"
-#ifdef PROC_DEBUG
-extern int vsprintf(char *, const char *, va_list);
-
-static void
-proc_debug(const char *fmt, ...)
-{
- va_list ap;
- char buf[256];
-
- va_start(ap, fmt);
- vsprintf(buf, fmt, ap);
- printk(buf);
- va_end(ap);
-}
-#else /* PROC_DEBUG */
-# define proc_debug(fmt, args...)
-#endif /* PROC_DEBUG */
-
-static int aic7xxx_buffer_size = 0;
-static char *aic7xxx_buffer = NULL;
-
/*+F*************************************************************************
* Function:
- * aic7xxx_set_info
- *
- * Description:
- * Set parameters for the driver from the /proc filesystem.
- *-F*************************************************************************/
-static int
-aic7xxx_set_info(char *buffer, int length, struct Scsi_Host *HBAptr)
-{
- proc_debug("aic7xxx_set_info(): %s\n", buffer);
- return (-ENOSYS); /* Currently this is a no-op */
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_proc_info
+ * aic7xxx_show_info
*
* Description:
* Return information to handle /proc support for the driver.
*-F*************************************************************************/
int
-aic7xxx_proc_info ( struct Scsi_Host *HBAptr, char *buffer, char **start, off_t offset, int length,
- int inout)
+aic7xxx_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
{
struct aic7xxx_host *p;
struct aic_dev_data *aic_dev;
struct scsi_device *sdptr;
- int size = 0;
unsigned char i;
unsigned char tindex;
@@ -94,66 +55,21 @@ aic7xxx_proc_info ( struct Scsi_Host *HBAptr, char *buffer, char **start, off_t
if (!p)
{
- size += sprintf(buffer, "Can't find adapter for host number %d\n", HBAptr->host_no);
- if (size > length)
- {
- return (size);
- }
- else
- {
- return (length);
- }
- }
-
- if (inout == TRUE) /* Has data been written to the file? */
- {
- return (aic7xxx_set_info(buffer, length, HBAptr));
+ seq_printf(m, "Can't find adapter for host number %d\n", HBAptr->host_no);
+ return 0;
}
p = (struct aic7xxx_host *) HBAptr->hostdata;
- /*
- * It takes roughly 1K of space to hold all relevant card info, not
- * counting any proc stats, so we start out with a 1.5k buffer size and
- * if proc_stats is defined, then we sweep the stats structure to see
- * how many drives we will be printing out for and add 384 bytes per
- * device with active stats.
- *
- * Hmmmm...that 1.5k seems to keep growing as items get added so they
- * can be easily viewed for debugging purposes. So, we bumped that
- * 1.5k to 4k so we can quit having to bump it all the time.
- */
-
- size = 4096;
- list_for_each_entry(aic_dev, &p->aic_devs, list)
- size += 512;
- if (aic7xxx_buffer_size != size)
- {
- if (aic7xxx_buffer != NULL)
- {
- kfree(aic7xxx_buffer);
- aic7xxx_buffer_size = 0;
- }
- aic7xxx_buffer = kmalloc(size, GFP_KERNEL);
- }
- if (aic7xxx_buffer == NULL)
- {
- size = sprintf(buffer, "AIC7xxx - kmalloc error at line %d\n",
- __LINE__);
- return size;
- }
- aic7xxx_buffer_size = size;
-
- size = 0;
- size += sprintf(BLS, "Adaptec AIC7xxx driver version: ");
- size += sprintf(BLS, "%s/", AIC7XXX_C_VERSION);
- size += sprintf(BLS, "%s", AIC7XXX_H_VERSION);
- size += sprintf(BLS, "\n");
- size += sprintf(BLS, "Adapter Configuration:\n");
- size += sprintf(BLS, " SCSI Adapter: %s\n",
+ seq_printf(m, "Adaptec AIC7xxx driver version: ");
+ seq_printf(m, "%s/", AIC7XXX_C_VERSION);
+ seq_printf(m, "%s", AIC7XXX_H_VERSION);
+ seq_printf(m, "\n");
+ seq_printf(m, "Adapter Configuration:\n");
+ seq_printf(m, " SCSI Adapter: %s\n",
board_names[p->board_name_index]);
if (p->flags & AHC_TWIN)
- size += sprintf(BLS, " Twin Channel Controller ");
+ seq_printf(m, " Twin Channel Controller ");
else
{
char *channel = "";
@@ -184,86 +100,86 @@ aic7xxx_proc_info ( struct Scsi_Host *HBAptr, char *buffer, char **start, off_t
ultra = "Ultra-2 LVD/SE ";
else if (p->features & AHC_ULTRA)
ultra = "Ultra ";
- size += sprintf(BLS, " %s%sController%s ",
+ seq_printf(m, " %s%sController%s ",
ultra, wide, channel);
}
switch(p->chip & ~AHC_CHIPID_MASK)
{
case AHC_VL:
- size += sprintf(BLS, "at VLB slot %d\n", p->pci_device_fn);
+ seq_printf(m, "at VLB slot %d\n", p->pci_device_fn);
break;
case AHC_EISA:
- size += sprintf(BLS, "at EISA slot %d\n", p->pci_device_fn);
+ seq_printf(m, "at EISA slot %d\n", p->pci_device_fn);
break;
default:
- size += sprintf(BLS, "at PCI %d/%d/%d\n", p->pci_bus,
+ seq_printf(m, "at PCI %d/%d/%d\n", p->pci_bus,
PCI_SLOT(p->pci_device_fn), PCI_FUNC(p->pci_device_fn));
break;
}
if( !(p->maddr) )
{
- size += sprintf(BLS, " Programmed I/O Base: %lx\n", p->base);
+ seq_printf(m, " Programmed I/O Base: %lx\n", p->base);
}
else
{
- size += sprintf(BLS, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase);
+ seq_printf(m, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase);
}
if( (p->chip & (AHC_VL | AHC_EISA)) )
{
- size += sprintf(BLS, " BIOS Memory Address: 0x%08x\n", p->bios_address);
+ seq_printf(m, " BIOS Memory Address: 0x%08x\n", p->bios_address);
}
- size += sprintf(BLS, " Adapter SEEPROM Config: %s\n",
+ seq_printf(m, " Adapter SEEPROM Config: %s\n",
(p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." :
((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." :
"SEEPROM not found, using leftover BIOS values.") );
- size += sprintf(BLS, " Adaptec SCSI BIOS: %s\n",
+ seq_printf(m, " Adaptec SCSI BIOS: %s\n",
(p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled");
- size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq);
- size += sprintf(BLS, " SCBs: Active %d, Max Active %d,\n",
+ seq_printf(m, " IRQ: %d\n", HBAptr->irq);
+ seq_printf(m, " SCBs: Active %d, Max Active %d,\n",
p->activescbs, p->max_activescbs);
- size += sprintf(BLS, " Allocated %d, HW %d, "
+ seq_printf(m, " Allocated %d, HW %d, "
"Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs,
p->scb_data->maxscbs);
if (p->flags & AHC_EXTERNAL_SRAM)
- size += sprintf(BLS, " Using External SCB SRAM\n");
- size += sprintf(BLS, " Interrupts: %ld", p->isr_count);
+ seq_printf(m, " Using External SCB SRAM\n");
+ seq_printf(m, " Interrupts: %ld", p->isr_count);
if (p->chip & AHC_EISA)
{
- size += sprintf(BLS, " %s\n",
+ seq_printf(m, " %s\n",
(p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)");
}
else
{
- size += sprintf(BLS, "\n");
+ seq_printf(m, "\n");
}
- size += sprintf(BLS, " BIOS Control Word: 0x%04x\n",
+ seq_printf(m, " BIOS Control Word: 0x%04x\n",
p->bios_control);
- size += sprintf(BLS, " Adapter Control Word: 0x%04x\n",
+ seq_printf(m, " Adapter Control Word: 0x%04x\n",
p->adapter_control);
- size += sprintf(BLS, " Extended Translation: %sabled\n",
+ seq_printf(m, " Extended Translation: %sabled\n",
(p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis");
- size += sprintf(BLS, "Disconnect Enable Flags: 0x%04x\n", p->discenable);
+ seq_printf(m, "Disconnect Enable Flags: 0x%04x\n", p->discenable);
if (p->features & (AHC_ULTRA | AHC_ULTRA2))
{
- size += sprintf(BLS, " Ultra Enable Flags: 0x%04x\n", p->ultraenb);
+ seq_printf(m, " Ultra Enable Flags: 0x%04x\n", p->ultraenb);
}
- size += sprintf(BLS, "Default Tag Queue Depth: %d\n", aic7xxx_default_queue_depth);
- size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host "
+ seq_printf(m, "Default Tag Queue Depth: %d\n", aic7xxx_default_queue_depth);
+ seq_printf(m, " Tagged Queue By Device array for aic7xxx host "
"instance %d:\n", p->instance);
- size += sprintf(BLS, " {");
+ seq_printf(m, " {");
for(i=0; i < (MAX_TARGETS - 1); i++)
- size += sprintf(BLS, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]);
- size += sprintf(BLS, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]);
+ seq_printf(m, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]);
+ seq_printf(m, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]);
- size += sprintf(BLS, "\n");
- size += sprintf(BLS, "Statistics:\n\n");
+ seq_printf(m, "\n");
+ seq_printf(m, "Statistics:\n\n");
list_for_each_entry(aic_dev, &p->aic_devs, list)
{
sdptr = aic_dev->SDptr;
tindex = sdptr->channel << 3 | sdptr->id;
- size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
+ seq_printf(m, "(scsi%d:%d:%d:%d)\n",
p->host_no, sdptr->channel, sdptr->id, sdptr->lun);
- size += sprintf(BLS, " Device using %s/%s",
+ seq_printf(m, " Device using %s/%s",
(aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ?
"Wide" : "Narrow",
(aic_dev->cur.offset != 0) ?
@@ -279,78 +195,59 @@ aic7xxx_proc_info ( struct Scsi_Host *HBAptr, char *buffer, char **start, off_t
sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options);
if (sync_rate != NULL)
{
- size += sprintf(BLS, "%s MByte/sec, offset %d\n",
+ seq_printf(m, "%s MByte/sec, offset %d\n",
sync_rate->rate[rate],
aic_dev->cur.offset );
}
else
{
- size += sprintf(BLS, "3.3 MByte/sec, offset %d\n",
+ seq_printf(m, "3.3 MByte/sec, offset %d\n",
aic_dev->cur.offset );
}
}
- size += sprintf(BLS, " Transinfo settings: ");
- size += sprintf(BLS, "current(%d/%d/%d/%d), ",
+ seq_printf(m, " Transinfo settings: ");
+ seq_printf(m, "current(%d/%d/%d/%d), ",
aic_dev->cur.period,
aic_dev->cur.offset,
aic_dev->cur.width,
aic_dev->cur.options);
- size += sprintf(BLS, "goal(%d/%d/%d/%d), ",
+ seq_printf(m, "goal(%d/%d/%d/%d), ",
aic_dev->goal.period,
aic_dev->goal.offset,
aic_dev->goal.width,
aic_dev->goal.options);
- size += sprintf(BLS, "user(%d/%d/%d/%d)\n",
+ seq_printf(m, "user(%d/%d/%d/%d)\n",
p->user[tindex].period,
p->user[tindex].offset,
p->user[tindex].width,
p->user[tindex].options);
if(sdptr->simple_tags)
{
- size += sprintf(BLS, " Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->queue_depth, aic_dev->max_q_depth);
+ seq_printf(m, " Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->queue_depth, aic_dev->max_q_depth);
}
if(aic_dev->barrier_total)
- size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n",
+ seq_printf(m, " Total transfers %ld:\n (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n",
aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total,
aic_dev->barrier_total, aic_dev->ordered_total);
else
- size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld reads/writes)\n",
+ seq_printf(m, " Total transfers %ld:\n (%ld/%ld reads/writes)\n",
aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total);
- size += sprintf(BLS, "%s\n", HDRB);
- size += sprintf(BLS, " Reads:");
+ seq_printf(m, "%s\n", HDRB);
+ seq_printf(m, " Reads:");
for (i = 0; i < ARRAY_SIZE(aic_dev->r_bins); i++)
{
- size += sprintf(BLS, " %10ld", aic_dev->r_bins[i]);
+ seq_printf(m, " %10ld", aic_dev->r_bins[i]);
}
- size += sprintf(BLS, "\n");
- size += sprintf(BLS, " Writes:");
+ seq_printf(m, "\n");
+ seq_printf(m, " Writes:");
for (i = 0; i < ARRAY_SIZE(aic_dev->w_bins); i++)
{
- size += sprintf(BLS, " %10ld", aic_dev->w_bins[i]);
+ seq_printf(m, " %10ld", aic_dev->w_bins[i]);
}
- size += sprintf(BLS, "\n");
- size += sprintf(BLS, "\n\n");
- }
- if (size >= aic7xxx_buffer_size)
- {
- printk(KERN_WARNING "aic7xxx: Overflow in aic7xxx_proc.c\n");
- }
-
- if (offset > size - 1)
- {
- kfree(aic7xxx_buffer);
- aic7xxx_buffer = NULL;
- aic7xxx_buffer_size = length = 0;
- *start = NULL;
+ seq_printf(m, "\n");
+ seq_printf(m, "\n\n");
}
- else
- {
- *start = buffer;
- length = min_t(int, length, size - offset);
- memcpy(buffer, &aic7xxx_buffer[offset], length);
- }
-
- return (length);
+ return 0;
}
/*
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 3e1172a..09ba186 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2836,20 +2836,15 @@ char *acornscsi_info(struct Scsi_Host *host)
return string;
}
-int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset,
- int length, int inout)
+static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
{
- int pos, begin = 0, devidx;
+ int devidx;
struct scsi_device *scd;
AS_Host *host;
- char *p = buffer;
-
- if (inout == 1)
- return -EINVAL;
host = (AS_Host *)instance->hostdata;
- p += sprintf(p, "AcornSCSI driver v%d.%d.%d"
+ seq_printf(m, "AcornSCSI driver v%d.%d.%d"
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
@@ -2864,14 +2859,14 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
#endif
"\n\n", VER_MAJOR, VER_MINOR, VER_PATCH);
- p += sprintf(p, "SBIC: WD33C93A Address: %p IRQ : %d\n",
+ seq_printf(m, "SBIC: WD33C93A Address: %p IRQ : %d\n",
host->base + SBIC_REGIDX, host->scsi.irq);
#ifdef USE_DMAC
- p += sprintf(p, "DMAC: uPC71071 Address: %p IRQ : %d\n\n",
+ seq_printf(m, "DMAC: uPC71071 Address: %p IRQ : %d\n\n",
host->base + DMAC_OFFSET, host->scsi.irq);
#endif
- p += sprintf(p, "Statistics:\n"
+ seq_printf(m, "Statistics:\n"
"Queued commands: %-10u Issued commands: %-10u\n"
"Done commands : %-10u Reads : %-10u\n"
"Writes : %-10u Others : %-10u\n"
@@ -2886,7 +2881,7 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
for (devidx = 0; devidx < 9; devidx ++) {
unsigned int statptr, prev;
- p += sprintf(p, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx));
+ seq_printf(m, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx));
statptr = host->status_ptr[devidx] - 10;
if ((signed int)statptr < 0)
@@ -2896,7 +2891,7 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
for (; statptr != host->status_ptr[devidx]; statptr = (statptr + 1) & (STATUS_BUFFER_SIZE - 1)) {
if (host->status[devidx][statptr].when) {
- p += sprintf(p, "%c%02X:%02X+%2ld",
+ seq_printf(m, "%c%02X:%02X+%2ld",
host->status[devidx][statptr].irq ? '-' : ' ',
host->status[devidx][statptr].ph,
host->status[devidx][statptr].ssr,
@@ -2907,51 +2902,32 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
}
}
- p += sprintf(p, "\nAttached devices:\n");
+ seq_printf(m, "\nAttached devices:\n");
shost_for_each_device(scd, instance) {
- p += sprintf(p, "Device/Lun TaggedQ Sync\n");
- p += sprintf(p, " %d/%d ", scd->id, scd->lun);
+ seq_printf(m, "Device/Lun TaggedQ Sync\n");
+ seq_printf(m, " %d/%d ", scd->id, scd->lun);
if (scd->tagged_supported)
- p += sprintf(p, "%3sabled(%3d) ",
+ seq_printf(m, "%3sabled(%3d) ",
scd->simple_tags ? "en" : "dis",
scd->current_tag);
else
- p += sprintf(p, "unsupported ");
+ seq_printf(m, "unsupported ");
if (host->device[scd->id].sync_xfer & 15)
- p += sprintf(p, "offset %d, %d ns\n",
+ seq_printf(m, "offset %d, %d ns\n",
host->device[scd->id].sync_xfer & 15,
acornscsi_getperiod(host->device[scd->id].sync_xfer));
else
- p += sprintf(p, "async\n");
+ seq_printf(m, "async\n");
- pos = p - buffer;
- if (pos + begin < offset) {
- begin += pos;
- p = buffer;
- }
- pos = p - buffer;
- if (pos + begin > offset + length) {
- scsi_device_put(scd);
- break;
- }
}
-
- pos = p - buffer;
-
- *start = buffer + (offset - begin);
- pos -= offset - begin;
-
- if (pos > length)
- pos = length;
-
- return pos;
+ return 0;
}
static struct scsi_host_template acornscsi_template = {
.module = THIS_MODULE,
- .proc_info = acornscsi_proc_info,
+ .show_info = acornscsi_show_info,
.name = "AcornSCSI",
.info = acornscsi_info,
.queuecommand = acornscsi_queuecmd,
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 9274510..32d2321 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -220,47 +220,21 @@ static const char *arxescsi_info(struct Scsi_Host *host)
return string;
}
-/*
- * Function: int arxescsi_proc_info(char *buffer, char **start, off_t offset,
- * int length, int host_no, int inout)
- * Purpose : Return information about the driver to a user process accessing
- * the /proc filesystem.
- * Params : buffer - a buffer to write information to
- * start - a pointer into this buffer set by this routine to the start
- * of the required information.
- * offset - offset into information that we have read up to.
- * length - length of buffer
- * host_no - host number to return information for
- * inout - 0 for reading, 1 for writing.
- * Returns : length of data written to buffer.
- */
static int
-arxescsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length,
- int inout)
+arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct arxescsi_info *info;
- char *p = buffer;
- int pos;
-
info = (struct arxescsi_info *)host->hostdata;
- if (inout == 1)
- return -EINVAL;
-
- p += sprintf(p, "ARXE 16-bit SCSI driver v%s\n", VERSION);
- p += fas216_print_host(&info->info, p);
- p += fas216_print_stats(&info->info, p);
- p += fas216_print_devices(&info->info, p);
-
- *start = buffer + offset;
- pos = p - buffer - offset;
- if (pos > length)
- pos = length;
- return pos;
+ seq_printf(m, "ARXE 16-bit SCSI driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
}
static struct scsi_host_template arxescsi_template = {
- .proc_info = arxescsi_proc_info,
+ .show_info = arxescsi_show_info,
.name = "ARXE SCSI card",
.info = arxescsi_info,
.queuecommand = fas216_noqueue_command,
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index c93938b..b679778 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -30,7 +30,6 @@
#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value)
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
-#define NCR5380_proc_info cumanascsi_proc_info
#define NCR5380_implementation_fields \
unsigned ctrl; \
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index e3bae93..58915f2 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -337,50 +337,25 @@ cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
return ret;
}
-/* Prototype: int cumanascsi_2_proc_info(char *buffer, char **start, off_t offset,
- * int length, int host_no, int inout)
- * Purpose : Return information about the driver to a user process accessing
- * the /proc filesystem.
- * Params : buffer - a buffer to write information to
- * start - a pointer into this buffer set by this routine to the start
- * of the required information.
- * offset - offset into information that we have read up to.
- * length - length of buffer
- * host_no - host number to return information for
- * inout - 0 for reading, 1 for writing.
- * Returns : length of data written to buffer.
- */
-int cumanascsi_2_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int inout)
+static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct cumanascsi2_info *info;
- char *p = buffer;
- int pos;
-
- if (inout == 1)
- return cumanascsi_2_set_proc_info(host, buffer, length);
-
info = (struct cumanascsi2_info *)host->hostdata;
- p += sprintf(p, "Cumana SCSI II driver v%s\n", VERSION);
- p += fas216_print_host(&info->info, p);
- p += sprintf(p, "Term : o%s\n",
+ seq_printf(m, "Cumana SCSI II driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ seq_printf(m, "Term : o%s\n",
info->terms ? "n" : "ff");
- p += fas216_print_stats(&info->info, p);
- p += fas216_print_devices(&info->info, p);
-
- *start = buffer + offset;
- pos = p - buffer - offset;
- if (pos > length)
- pos = length;
-
- return pos;
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
}
static struct scsi_host_template cumanascsi2_template = {
.module = THIS_MODULE,
- .proc_info = cumanascsi_2_proc_info,
+ .show_info = cumanascsi_2_show_info,
+ .write_info = cumanascsi_2_set_proc_info,
.name = "Cumana SCSI II",
.info = cumanascsi_2_info,
.queuecommand = fas216_queue_command,
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 8e36908..5bf3c0d 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -422,45 +422,20 @@ eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
return ret;
}
-/* Prototype: int eesoxscsi_proc_info(char *buffer, char **start, off_t offset,
- * int length, int host_no, int inout)
- * Purpose : Return information about the driver to a user process accessing
- * the /proc filesystem.
- * Params : buffer - a buffer to write information to
- * start - a pointer into this buffer set by this routine to the start
- * of the required information.
- * offset - offset into information that we have read up to.
- * length - length of buffer
- * host_no - host number to return information for
- * inout - 0 for reading, 1 for writing.
- * Returns : length of data written to buffer.
- */
-int eesoxscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int inout)
+static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct eesoxscsi_info *info;
- char *p = buffer;
- int pos;
-
- if (inout == 1)
- return eesoxscsi_set_proc_info(host, buffer, length);
info = (struct eesoxscsi_info *)host->hostdata;
- p += sprintf(p, "EESOX SCSI driver v%s\n", VERSION);
- p += fas216_print_host(&info->info, p);
- p += sprintf(p, "Term : o%s\n",
+ seq_printf(m, "EESOX SCSI driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ seq_printf(m, "Term : o%s\n",
info->control & EESOX_TERM_ENABLE ? "n" : "ff");
- p += fas216_print_stats(&info->info, p);
- p += fas216_print_devices(&info->info, p);
-
- *start = buffer + offset;
- pos = p - buffer - offset;
- if (pos > length)
- pos = length;
-
- return pos;
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
}
static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf)
@@ -498,7 +473,8 @@ static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR,
static struct scsi_host_template eesox_template = {
.module = THIS_MODULE,
- .proc_info = eesoxscsi_proc_info,
+ .show_info = eesoxscsi_show_info,
+ .write_info = eesoxscsi_set_proc_info,
.name = "EESOX SCSI",
.info = eesoxscsi_info,
.queuecommand = fas216_queue_command,
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 737554c..b46a6f6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2958,9 +2958,9 @@ void fas216_release(struct Scsi_Host *host)
queue_free(&info->queues.issue);
}
-int fas216_print_host(FAS216_Info *info, char *buffer)
+void fas216_print_host(FAS216_Info *info, struct seq_file *m)
{
- return sprintf(buffer,
+ seq_printf(m,
"\n"
"Chip : %s\n"
" Address: 0x%p\n"
@@ -2970,11 +2970,9 @@ int fas216_print_host(FAS216_Info *info, char *buffer)
info->scsi.irq, info->scsi.dma);
}
-int fas216_print_stats(FAS216_Info *info, char *buffer)
+void fas216_print_stats(FAS216_Info *info, struct seq_file *m)
{
- char *p = buffer;
-
- p += sprintf(p, "\n"
+ seq_printf(m, "\n"
"Command Statistics:\n"
" Queued : %u\n"
" Issued : %u\n"
@@ -2991,38 +2989,33 @@ int fas216_print_stats(FAS216_Info *info, char *buffer)
info->stats.writes, info->stats.miscs,
info->stats.disconnects, info->stats.aborts,
info->stats.bus_resets, info->stats.host_resets);
-
- return p - buffer;
}
-int fas216_print_devices(FAS216_Info *info, char *buffer)
+void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
{
struct fas216_device *dev;
struct scsi_device *scd;
- char *p = buffer;
- p += sprintf(p, "Device/Lun TaggedQ Parity Sync\n");
+ seq_printf(m, "Device/Lun TaggedQ Parity Sync\n");
shost_for_each_device(scd, info->host) {
dev = &info->device[scd->id];
- p += sprintf(p, " %d/%d ", scd->id, scd->lun);
+ seq_printf(m, " %d/%d ", scd->id, scd->lun);
if (scd->tagged_supported)
- p += sprintf(p, "%3sabled(%3d) ",
+ seq_printf(m, "%3sabled(%3d) ",
scd->simple_tags ? "en" : "dis",
scd->current_tag);
else
- p += sprintf(p, "unsupported ");
+ seq_printf(m, "unsupported ");
- p += sprintf(p, "%3sabled ", dev->parity_enabled ? "en" : "dis");
+ seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis");
if (dev->sof)
- p += sprintf(p, "offset %d, %d ns\n",
+ seq_printf(m, "offset %d, %d ns\n",
dev->sof, dev->period * 4);
else
- p += sprintf(p, "async\n");
+ seq_printf(m, "async\n");
}
-
- return p - buffer;
}
EXPORT_SYMBOL(fas216_init);
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index df2e1b3..c57c16e 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -358,9 +358,9 @@ extern void fas216_remove (struct Scsi_Host *instance);
*/
extern void fas216_release (struct Scsi_Host *instance);
-extern int fas216_print_host(FAS216_Info *info, char *buffer);
-extern int fas216_print_stats(FAS216_Info *info, char *buffer);
-extern int fas216_print_devices(FAS216_Info *info, char *buffer);
+extern void fas216_print_host(FAS216_Info *info, struct seq_file *m);
+extern void fas216_print_stats(FAS216_Info *info, struct seq_file *m);
+extern void fas216_print_devices(FAS216_Info *info, struct seq_file *m);
/* Function: int fas216_eh_abort(struct scsi_cmnd *SCpnt)
* Purpose : abort this command
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 48facdc..4266eef 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -31,7 +31,8 @@
#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2))
#define NCR5380_intr oakscsi_intr
#define NCR5380_queue_command oakscsi_queue_command
-#define NCR5380_proc_info oakscsi_proc_info
+#define NCR5380_show_info oakscsi_show_info
+#define NCR5380_write_info oakscsi_write_info
#define NCR5380_implementation_fields \
void __iomem *base
@@ -115,7 +116,8 @@ printk("reading %p len %d\n", addr, len);
static struct scsi_host_template oakscsi_template = {
.module = THIS_MODULE,
- .proc_info = oakscsi_proc_info,
+ .show_info = oakscsi_show_info,
+ .write_info = oakscsi_write_info,
.name = "Oak 16-bit SCSI",
.info = oakscsi_info,
.queuecommand = oakscsi_queue_command,
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 246600b..abc9593 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -237,32 +237,20 @@ powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
* inout - 0 for reading, 1 for writing.
* Returns : length of data written to buffer.
*/
-int powertecscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int inout)
+static int powertecscsi_show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct powertec_info *info;
- char *p = buffer;
- int pos;
-
- if (inout == 1)
- return powertecscsi_set_proc_info(host, buffer, length);
info = (struct powertec_info *)host->hostdata;
- p += sprintf(p, "PowerTec SCSI driver v%s\n", VERSION);
- p += fas216_print_host(&info->info, p);
- p += sprintf(p, "Term : o%s\n",
+ seq_printf(m, "PowerTec SCSI driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ seq_printf(m, "Term : o%s\n",
info->term_ctl ? "n" : "ff");
- p += fas216_print_stats(&info->info, p);
- p += fas216_print_devices(&info->info, p);
-
- *start = buffer + offset;
- pos = p - buffer - offset;
- if (pos > length)
- pos = length;
-
- return pos;
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
}
static ssize_t powertecscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf)
@@ -291,7 +279,8 @@ static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR,
static struct scsi_host_template powertecscsi_template = {
.module = THIS_MODULE,
- .proc_info = powertecscsi_proc_info,
+ .show_info = powertecscsi_show_info,
+ .write_info = powertecscsi_set_proc_info,
.name = "PowerTec SCSI",
.info = powertecscsi_info,
.queuecommand = fas216_queue_command,
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 2db79b4..0f3cdbc 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -719,119 +719,94 @@ static void __init NCR5380_print_options(struct Scsi_Host *instance)
* Inputs : instance, pointer to this instance.
*/
-static void NCR5380_print_status(struct Scsi_Host *instance)
+static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
{
- char *pr_bfr;
- char *start;
- int len;
-
- NCR_PRINT(NDEBUG_ANY);
- NCR_PRINT_PHASE(NDEBUG_ANY);
-
- pr_bfr = (char *)__get_free_page(GFP_ATOMIC);
- if (!pr_bfr) {
- printk("NCR5380_print_status: no memory for print buffer\n");
- return;
- }
- len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0);
- pr_bfr[len] = 0;
- printk("\n%s\n", pr_bfr);
- free_page((unsigned long)pr_bfr);
+ int i, s;
+ unsigned char *command;
+ printk("scsi%d: destination target %d, lun %d\n",
+ H_NO(cmd), cmd->device->id, cmd->device->lun);
+ printk(KERN_CONT " command = ");
+ command = cmd->cmnd;
+ printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]);
+ for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ printk(KERN_CONT " %02x", command[i]);
+ printk("\n");
}
-
-/******************************************/
-/*
- * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
- *
- * *buffer: I/O buffer
- * **start: if inout == FALSE pointer into buffer where user read should start
- * offset: current offset
- * length: length of buffer
- * hostno: Scsi_Host host_no
- * inout: TRUE - user is writing; FALSE - user is reading
- *
- * Return the number of bytes read from or written
-*/
-
-#undef SPRINTF
-#define SPRINTF(fmt,args...) \
- do { \
- if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \
- pos += sprintf(pos, fmt , ## args); \
- } while(0)
-static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length);
-
-static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer,
- char **start, off_t offset, int length, int inout)
+static void NCR5380_print_status(struct Scsi_Host *instance)
{
- char *pos = buffer;
struct NCR5380_hostdata *hostdata;
Scsi_Cmnd *ptr;
unsigned long flags;
- off_t begin = 0;
-#define check_offset() \
- do { \
- if (pos - buffer < offset - begin) { \
- begin += pos - buffer; \
- pos = buffer; \
- } \
- } while (0)
+
+ NCR_PRINT(NDEBUG_ANY);
+ NCR_PRINT_PHASE(NDEBUG_ANY);
hostdata = (struct NCR5380_hostdata *)instance->hostdata;
- if (inout) /* Has data been written to the file ? */
- return -ENOSYS; /* Currently this is a no-op */
- SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
- check_offset();
+ printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
local_irq_save(flags);
- SPRINTF("NCR5380: coroutine is%s running.\n",
+ printk("NCR5380: coroutine is%s running.\n",
main_running ? "" : "n't");
- check_offset();
if (!hostdata->connected)
- SPRINTF("scsi%d: no currently connected command\n", HOSTNO);
+ printk("scsi%d: no currently connected command\n", HOSTNO);
else
- pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected,
- pos, buffer, length);
- SPRINTF("scsi%d: issue_queue\n", HOSTNO);
- check_offset();
- for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) {
- pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
- check_offset();
- }
+ lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected);
+ printk("scsi%d: issue_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
+ lprint_Scsi_Cmnd(ptr);
- SPRINTF("scsi%d: disconnected_queue\n", HOSTNO);
- check_offset();
+ printk("scsi%d: disconnected_queue\n", HOSTNO);
for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
- ptr = NEXT(ptr)) {
- pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
- check_offset();
- }
+ ptr = NEXT(ptr))
+ lprint_Scsi_Cmnd(ptr);
local_irq_restore(flags);
- *start = buffer + (offset - begin);
- if (pos - buffer < offset - begin)
- return 0;
- else if (pos - buffer - (offset - begin) < length)
- return pos - buffer - (offset - begin);
- return length;
+ printk("\n");
}
-static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length)
+static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
{
int i, s;
unsigned char *command;
- SPRINTF("scsi%d: destination target %d, lun %d\n",
+ seq_printf(m, "scsi%d: destination target %d, lun %d\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
- SPRINTF(" command = ");
+ seq_printf(m, " command = ");
command = cmd->cmnd;
- SPRINTF("%2d (0x%02x)", command[0], command[0]);
+ seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
- SPRINTF(" %02x", command[i]);
- SPRINTF("\n");
- return pos;
+ seq_printf(m, " %02x", command[i]);
+ seq_printf(m, "\n");
}
+static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata;
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
+ local_irq_save(flags);
+ seq_printf(m, "NCR5380: coroutine is%s running.\n",
+ main_running ? "" : "n't");
+ if (!hostdata->connected)
+ seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
+ else
+ show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m);
+ seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
+ show_Scsi_Cmnd(ptr, m);
+
+ seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = NEXT(ptr))
+ show_Scsi_Cmnd(ptr, m);
+
+ local_irq_restore(flags);
+ return 0;
+}
/*
* Function : void NCR5380_init (struct Scsi_Host *instance)
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index df740cb..a3e6c8a 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -1100,7 +1100,7 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
#include "atari_NCR5380.c"
static struct scsi_host_template driver_template = {
- .proc_info = atari_scsi_proc_info,
+ .show_info = atari_scsi_show_info,
.name = "Atari native SCSI",
.detect = atari_scsi_detect,
.release = atari_scsi_release,
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
index bd52df7..11c624b 100644
--- a/drivers/scsi/atari_scsi.h
+++ b/drivers/scsi/atari_scsi.h
@@ -47,7 +47,7 @@
#define NCR5380_intr atari_scsi_intr
#define NCR5380_queue_command atari_scsi_queue_command
#define NCR5380_abort atari_scsi_abort
-#define NCR5380_proc_info atari_scsi_proc_info
+#define NCR5380_show_info atari_scsi_show_info
#define NCR5380_dma_read_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 0)
#define NCR5380_dma_write_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 1)
#define NCR5380_dma_residual(inst) atari_scsi_dma_residual( inst )
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index cfc7304..15a629d 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3099,38 +3099,14 @@ static const char *atp870u_info(struct Scsi_Host *notused)
return buffer;
}
-#define BLS buffer + len + size
-static int atp870u_proc_info(struct Scsi_Host *HBAptr, char *buffer,
- char **start, off_t offset, int length, int inout)
+static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
{
- static u8 buff[512];
- int size = 0;
- int len = 0;
- off_t begin = 0;
- off_t pos = 0;
-
- if (inout)
- return -EINVAL;
- if (offset == 0)
- memset(buff, 0, sizeof(buff));
- size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.6+ac\n");
- len += size;
- pos = begin + len;
- size = 0;
-
- size += sprintf(BLS, "\n");
- size += sprintf(BLS, "Adapter Configuration:\n");
- size += sprintf(BLS, " Base IO: %#.4lx\n", HBAptr->io_port);
- size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq);
- len += size;
- pos = begin + len;
-
- *start = buffer + (offset - begin); /* Start of wanted data */
- len -= (offset - begin); /* Start slop */
- if (len > length) {
- len = length; /* Ending slop */
- }
- return (len);
+ seq_printf(m, "ACARD AEC-671X Driver Version: 2.6+ac\n");
+ seq_printf(m, "\n");
+ seq_printf(m, "Adapter Configuration:\n");
+ seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port);
+ seq_printf(m, " IRQ: %d\n", HBAptr->irq);
+ return 0;
}
@@ -3177,7 +3153,7 @@ static struct scsi_host_template atp870u_template = {
.module = THIS_MODULE,
.name = "atp870u" /* name */,
.proc_name = "atp870u",
- .proc_info = atp870u_proc_info,
+ .show_info = atp870u_show_info,
.info = atp870u_info /* info */,
.queuecommand = atp870u_queuecommand /* queuecommand */,
.eh_abort_handler = atp870u_abort /* abort */,
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index fed486bf..694e13c 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4616,26 +4616,21 @@ static void adapter_uninit(struct AdapterCtlBlk *acb)
#undef SPRINTF
-#define SPRINTF(args...) pos += sprintf(pos, args)
+#define SPRINTF(args...) seq_printf(m,##args)
#undef YESNO
#define YESNO(YN) \
if (YN) SPRINTF(" Yes ");\
else SPRINTF(" No ")
-static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
- char **start, off_t offset, int length, int inout)
+static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
int spd, spd1;
- char *pos = buffer;
struct DeviceCtlBlk *dcb;
unsigned long flags;
int dev;
- if (inout) /* Has data been written to the file ? */
- return -EPERM;
-
SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
SPRINTF(" Driver Version " DC395X_VERSION "\n");
@@ -4735,22 +4730,15 @@ static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
SPRINTF("END\n");
}
- *start = buffer + offset;
DC395x_UNLOCK_IO(acb->scsi_host, flags);
-
- if (pos - buffer < offset)
- return 0;
- else if (pos - buffer - offset < length)
- return pos - buffer - offset;
- else
- return length;
+ return 0;
}
static struct scsi_host_template dc395x_driver_template = {
.module = THIS_MODULE,
.proc_name = DC395X_NAME,
- .proc_info = dc395x_proc_info,
+ .show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
.bios_param = dc395x_bios_param,
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b6e2700..19e1b42 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -553,36 +553,14 @@ static const char *adpt_info(struct Scsi_Host *host)
return (char *) (pHba->detail);
}
-static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int inout)
+static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct adpt_device* d;
int id;
int chan;
- int len = 0;
- int begin = 0;
- int pos = 0;
adpt_hba* pHba;
int unit;
- *start = buffer;
- if (inout == TRUE) {
- /*
- * The user has done a write and wants us to take the
- * data in the buffer and do something with it.
- * proc_scsiwrite calls us with inout = 1
- *
- * Read data from buffer (writing to us) - NOT SUPPORTED
- */
- return -EINVAL;
- }
-
- /*
- * inout = 0 means the user has done a read and wants information
- * returned, so we write information about the cards into the buffer
- * proc_scsiread() calls us with inout = 0
- */
-
// Find HBA (host bus adapter) we are looking for
mutex_lock(&adpt_configuration_lock);
for (pHba = hba_chain; pHba; pHba = pHba->next) {
@@ -596,86 +574,30 @@ static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, of
}
host = pHba->host;
- len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
- len += sprintf(buffer+len, "%s\n", pHba->detail);
- len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
+ seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
+ seq_printf(m, "%s\n", pHba->detail);
+ seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
pHba->host->host_no, pHba->name, host->irq);
- len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
+ seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
- pos = begin + len;
-
- /* CHECKPOINT */
- if(pos > offset + length) {
- goto stop_output;
- }
- if(pos <= offset) {
- /*
- * If we haven't even written to where we last left
- * off (the last time we were called), reset the
- * beginning pointer.
- */
- len = 0;
- begin = pos;
- }
- len += sprintf(buffer+len, "Devices:\n");
+ seq_printf(m, "Devices:\n");
for(chan = 0; chan < MAX_CHANNEL; chan++) {
for(id = 0; id < MAX_ID; id++) {
d = pHba->channel[chan].device[id];
- while(d){
- len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
- len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
- pos = begin + len;
-
-
- /* CHECKPOINT */
- if(pos > offset + length) {
- goto stop_output;
- }
- if(pos <= offset) {
- len = 0;
- begin = pos;
- }
+ while(d) {
+ seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
+ seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
unit = d->pI2o_dev->lct_data.tid;
- len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
+ seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
scsi_device_online(d->pScsi_dev)? "online":"offline");
- pos = begin + len;
-
- /* CHECKPOINT */
- if(pos > offset + length) {
- goto stop_output;
- }
- if(pos <= offset) {
- len = 0;
- begin = pos;
- }
-
d = d->next_lun;
}
}
}
-
- /*
- * begin is where we last checked our position with regards to offset
- * begin is always less than offset. len is relative to begin. It
- * is the number of bytes written past begin
- *
- */
-stop_output:
- /* stop the output and calculate the correct length */
- *(buffer + len) = '\0';
-
- *start = buffer + (offset - begin); /* Start of wanted data */
- len -= (offset - begin);
- if(len > length) {
- len = length;
- } else if(len < 0){
- len = 0;
- **start = '\0';
- }
- return len;
+ return 0;
}
/*
@@ -3639,7 +3561,7 @@ static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "dpt_i2o",
.proc_name = "dpt_i2o",
- .proc_info = adpt_proc_info,
+ .show_info = adpt_show_info,
.info = adpt_info,
.queuecommand = adpt_queue,
.eh_abort_handler = adpt_abort,
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index 4b11bb0..d01f016 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -216,7 +216,8 @@ static int __init dtc_detect(struct scsi_host_template * tpnt)
int sig, count;
tpnt->proc_name = "dtc3x80";
- tpnt->proc_info = &dtc_proc_info;
+ tpnt->show_info = dtc_show_info;
+ tpnt->write_info = dtc_write_info;
for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
addr = 0;
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index cdc6212..92d7cfc 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -88,7 +88,8 @@ static int dtc_bus_reset(Scsi_Cmnd *);
#define NCR5380_queue_command dtc_queue_command
#define NCR5380_abort dtc_abort
#define NCR5380_bus_reset dtc_bus_reset
-#define NCR5380_proc_info dtc_proc_info
+#define NCR5380_show_info dtc_show_info
+#define NCR5380_write_info dtc_write_info
/* 15 12 11 10
1001 1100 0000 0000 */
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index d5f8362..356def4 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -92,58 +92,22 @@ static unsigned long queue_counter;
static struct scsi_host_template driver_template;
-/*
- * eata_proc_info
- * inout : decides on the direction of the dataflow and the meaning of the
- * variables
- * buffer: If inout==FALSE data is being written to it else read from it
- * *start: If inout==FALSE start of the valid data in the buffer
- * offset: If inout==FALSE offset from the beginning of the imaginary file
- * from which we start writing into the buffer
- * length: If inout==FALSE max number of bytes to be written into the buffer
- * else number of bytes in the buffer
- */
-static int eata_pio_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset,
- int length, int rw)
+static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
- int len = 0;
- off_t begin = 0, pos = 0;
-
- if (rw)
- return -ENOSYS;
-
- len += sprintf(buffer+len, "EATA (Extended Attachment) PIO driver version: "
+ seq_printf(m, "EATA (Extended Attachment) PIO driver version: "
"%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
- len += sprintf(buffer + len, "queued commands: %10ld\n"
+ seq_printf(m, "queued commands: %10ld\n"
"processed interrupts:%10ld\n", queue_counter, int_counter);
- len += sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ seq_printf(m, "\nscsi%-2d: HBA %.10s\n",
shost->host_no, SD(shost)->name);
- len += sprintf(buffer + len, "Firmware revision: v%s\n",
+ seq_printf(m, "Firmware revision: v%s\n",
SD(shost)->revision);
- len += sprintf(buffer + len, "IO: PIO\n");
- len += sprintf(buffer + len, "Base IO : %#.4x\n", (u32) shost->base);
- len += sprintf(buffer + len, "Host Bus: %s\n",
+ seq_printf(m, "IO: PIO\n");
+ seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base);
+ seq_printf(m, "Host Bus: %s\n",
(SD(shost)->bustype == 'P')?"PCI ":
(SD(shost)->bustype == 'E')?"EISA":"ISA ");
-
- pos = begin + len;
-
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length)
- goto stop_output;
-
-stop_output:
- DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
- *start = buffer + (offset - begin); /* Start of wanted data */
- len -= (offset - begin); /* Start slop */
- if (len > length)
- len = length; /* Ending slop */
- DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
-
- return len;
+ return 0;
}
static int eata_pio_release(struct Scsi_Host *sh)
@@ -985,7 +949,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
static struct scsi_host_template driver_template = {
.proc_name = "eata_pio",
.name = "EATA (Extended Attachment) PIO driver",
- .proc_info = eata_pio_proc_info,
+ .show_info = eata_pio_show_info,
.detect = eata_pio_detect,
.release = eata_pio_release,
.queuecommand = eata_pio_queue,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5041f92..5cec6c6 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -745,42 +745,36 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
#include "NCR5380.c"
-#define PRINTP(x) len += sprintf(buffer+len, x)
+#define PRINTP(x) seq_printf(m, x)
#define ANDP ,
-static int sprint_opcode(char *buffer, int len, int opcode)
+static void sprint_opcode(struct seq_file *m, int opcode)
{
- int start = len;
PRINTP("0x%02x " ANDP opcode);
- return len - start;
}
-static int sprint_command(char *buffer, int len, unsigned char *command)
+static void sprint_command(struct seq_file *m, unsigned char *command)
{
- int i, s, start = len;
- len += sprint_opcode(buffer, len, command[0]);
+ int i, s;
+ sprint_opcode(m, command[0]);
for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
PRINTP("%02x " ANDP command[i]);
PRINTP("\n");
- return len - start;
}
/**
* sprintf_Scsi_Cmnd - print a scsi command
- * @buffer: buffr to print into
- * @len: buffer length
+ * @m: seq_fil to print into
* @cmd: SCSI command block
*
* Print out the target and command data in hex
*/
-static int sprint_Scsi_Cmnd(char *buffer, int len, Scsi_Cmnd * cmd)
+static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd)
{
- int start = len;
PRINTP("host number %d destination target %d, lun %d\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun);
PRINTP(" command = ");
- len += sprint_command(buffer, len, cmd->cmnd);
- return len - start;
+ sprint_command(m, cmd->cmnd);
}
/**
@@ -800,9 +794,8 @@ static int sprint_Scsi_Cmnd(char *buffer, int len, Scsi_Cmnd * cmd)
* Locks: global cli/lock for queue walk
*/
-static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, char **start, off_t offset, int length, int inout)
+static int generic_NCR5380_show_info(struct seq_file *m, struct Scsi_Host *scsi_ptr)
{
- int len = 0;
NCR5380_local_declare();
unsigned long flags;
unsigned char status;
@@ -853,16 +846,16 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type));
for (i = 0; i < 8; i++)
if (dev->vendor[i] >= 0x20)
- *(buffer + (len++)) = dev->vendor[i];
- *(buffer + (len++)) = ' ';
+ seq_putc(m, dev->vendor[i]);
+ seq_putc(m, ' ');
for (i = 0; i < 16; i++)
if (dev->model[i] >= 0x20)
- *(buffer + (len++)) = dev->model[i];
- *(buffer + (len++)) = ' ';
+ seq_putc(m, dev->model[i]);
+ seq_putc(m, ' ');
for (i = 0; i < 4; i++)
if (dev->rev[i] >= 0x20)
- *(buffer + (len++)) = dev->rev[i];
- *(buffer + (len++)) = ' ';
+ seq_putc(m, dev->rev[i]);
+ seq_putc(m, ' ');
PRINTP("\n%10ld kb read in %5ld secs" ANDP br / 1024 ANDP tr);
if (tr)
@@ -886,32 +879,28 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
if (!hostdata->connected) {
PRINTP("No currently connected command\n");
} else {
- len += sprint_Scsi_Cmnd(buffer, len, (Scsi_Cmnd *) hostdata->connected);
+ sprint_Scsi_Cmnd(m, (Scsi_Cmnd *) hostdata->connected);
}
PRINTP("issue_queue\n");
for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
- len += sprint_Scsi_Cmnd(buffer, len, ptr);
+ sprint_Scsi_Cmnd(m, ptr);
PRINTP("disconnected_queue\n");
for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
- len += sprint_Scsi_Cmnd(buffer, len, ptr);
+ sprint_Scsi_Cmnd(m, ptr);
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
spin_unlock_irqrestore(scsi_ptr->host_lock, flags);
- return len;
+ return 0;
}
#undef PRINTP
#undef ANDP
static struct scsi_host_template driver_template = {
- .proc_info = generic_NCR5380_proc_info,
+ .show_info = generic_NCR5380_show_info,
.name = "Generic NCR5380/NCR53C400 Scsi Driver",
.detect = generic_NCR5380_detect,
.release = generic_NCR5380_release_resources,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 59bceac..6d55b4e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4676,7 +4676,8 @@ static struct scsi_host_template gdth_template = {
.eh_bus_reset_handler = gdth_eh_bus_reset,
.slave_configure = gdth_slave_configure,
.bios_param = gdth_bios_param,
- .proc_info = gdth_proc_info,
+ .show_info = gdth_show_info,
+ .write_info = gdth_set_info,
.eh_timed_out = gdth_timed_out,
.proc_name = "gdth",
.can_queue = GDTH_MAXCMDS,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index fbf6f0f..3fd8b83 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -1007,6 +1007,7 @@ typedef struct {
/* function prototyping */
-int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int);
+int gdth_show_info(struct seq_file *, struct Scsi_Host *);
+int gdth_set_info(struct Scsi_Host *, char *, int);
#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 6527543..9fb6326 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -5,23 +5,9 @@
#include <linux/completion.h>
#include <linux/slab.h>
-int gdth_proc_info(struct Scsi_Host *host, char *buffer,char **start,off_t offset,int length,
- int inout)
+int gdth_set_info(struct Scsi_Host *host, char *buffer, int length)
{
gdth_ha_str *ha = shost_priv(host);
-
- TRACE2(("gdth_proc_info() length %d offs %d inout %d\n",
- length,(int)offset,inout));
-
- if (inout)
- return(gdth_set_info(buffer,length,host,ha));
- else
- return(gdth_get_info(buffer,start,offset,length,host,ha));
-}
-
-static int gdth_set_info(char *buffer,int length,struct Scsi_Host *host,
- gdth_ha_str *ha)
-{
int ret_val = -EINVAL;
TRACE2(("gdth_set_info() ha %d\n",ha->hanum,));
@@ -149,12 +135,10 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
return(-EINVAL);
}
-static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
- struct Scsi_Host *host, gdth_ha_str *ha)
+int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- int size = 0,len = 0;
+ gdth_ha_str *ha = shost_priv(host);
int hlen;
- off_t begin = 0,pos = 0;
int id, i, j, k, sec, flag;
int no_mdrv = 0, drv_no, is_mirr;
u32 cnt;
@@ -189,8 +173,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
/* request is i.e. "cat /proc/scsi/gdth/0" */
/* format: %-15s\t%-10s\t%-15s\t%s */
/* driver parameters */
- size = sprintf(buffer+len,"Driver Parameters:\n");
- len += size; pos = begin + len;
+ seq_printf(m, "Driver Parameters:\n");
if (reserve_list[0] == 0xff)
strcpy(hrec, "--");
else {
@@ -201,69 +184,50 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
}
}
- size = sprintf(buffer+len,
+ seq_printf(m,
" reserve_mode: \t%d \treserve_list: \t%s\n",
reserve_mode, hrec);
- len += size; pos = begin + len;
- size = sprintf(buffer+len,
+ seq_printf(m,
" max_ids: \t%-3d \thdr_channel: \t%d\n",
max_ids, hdr_channel);
- len += size; pos = begin + len;
/* controller information */
- size = sprintf(buffer+len,"\nDisk Array Controller Information:\n");
- len += size; pos = begin + len;
- strcpy(hrec, ha->binfo.type_string);
- size = sprintf(buffer+len,
+ seq_printf(m,"\nDisk Array Controller Information:\n");
+ seq_printf(m,
" Number: \t%d \tName: \t%s\n",
- ha->hanum, hrec);
- len += size; pos = begin + len;
+ ha->hanum, ha->binfo.type_string);
+ seq_printf(m,
+ " Driver Ver.: \t%-10s\tFirmware Ver.: \t",
+ GDTH_VERSION_STR);
if (ha->more_proc)
- sprintf(hrec, "%d.%02d.%02d-%c%03X",
+ seq_printf(m, "%d.%02d.%02d-%c%03X\n",
(u8)(ha->binfo.upd_fw_ver>>24),
(u8)(ha->binfo.upd_fw_ver>>16),
(u8)(ha->binfo.upd_fw_ver),
ha->bfeat.raid ? 'R':'N',
ha->binfo.upd_revision);
else
- sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8),
+ seq_printf(m, "%d.%02d\n", (u8)(ha->cpar.version>>8),
(u8)(ha->cpar.version));
-
- size = sprintf(buffer+len,
- " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n",
- GDTH_VERSION_STR, hrec);
- len += size; pos = begin + len;
- if (ha->more_proc) {
+ if (ha->more_proc)
/* more information: 1. about controller */
- size = sprintf(buffer+len,
+ seq_printf(m,
" Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n",
ha->binfo.ser_no, ha->binfo.memsize / 1024);
- len += size; pos = begin + len;
- }
#ifdef GDTH_DMA_STATISTICS
/* controller statistics */
- size = sprintf(buffer+len,"\nController Statistics:\n");
- len += size; pos = begin + len;
- size = sprintf(buffer+len,
+ seq_printf(m,"\nController Statistics:\n");
+ seq_printf(m,
" 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n",
ha->dma32_cnt, ha->dma64_cnt);
- len += size; pos = begin + len;
#endif
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length)
- goto stop_output;
-
if (ha->more_proc) {
/* more information: 2. about physical devices */
- size = sprintf(buffer+len,"\nPhysical Devices:");
- len += size; pos = begin + len;
+ seq_printf(m, "\nPhysical Devices:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -309,21 +273,19 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
strncpy(hrec+8,pdi->product,16);
strncpy(hrec+24,pdi->revision,4);
hrec[28] = 0;
- size = sprintf(buffer+len,
+ seq_printf(m,
"\n Chn/ID/LUN: \t%c/%02d/%d \tName: \t%s\n",
'A'+i,pdi->target_id,pdi->lun,hrec);
- len += size; pos = begin + len;
flag = TRUE;
pdi->no_ldrive &= 0xffff;
if (pdi->no_ldrive == 0xffff)
strcpy(hrec,"--");
else
sprintf(hrec,"%d",pdi->no_ldrive);
- size = sprintf(buffer+len,
+ seq_printf(m,
" Capacity [MB]:\t%-6d \tTo Log. Drive: \t%s\n",
pdi->blkcnt/(1024*1024/pdi->blksize),
hrec);
- len += size; pos = begin + len;
} else {
pdi->devtype = 0xff;
}
@@ -333,11 +295,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
for (k = 0; k < pds->count; ++k) {
if (pds->list[k].tid == pdi->target_id &&
pds->list[k].lun == pdi->lun) {
- size = sprintf(buffer+len,
+ seq_printf(m,
" Retries: \t%-6d \tReassigns: \t%d\n",
pds->list[k].retries,
pds->list[k].reassigns);
- len += size; pos = begin + len;
break;
}
}
@@ -355,32 +316,20 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
pdef->sddc_type = 0x08;
if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- size = sprintf(buffer+len,
+ seq_printf(m,
" Grown Defects:\t%d\n",
pdef->sddc_cnt);
- len += size; pos = begin + len;
}
}
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length) {
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- goto stop_output;
- }
}
}
gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- if (!flag) {
- size = sprintf(buffer+len, "\n --\n");
- len += size; pos = begin + len;
- }
+ if (!flag)
+ seq_printf(m, "\n --\n");
/* 3. about logical drives */
- size = sprintf(buffer+len,"\nLogical Drives:");
- len += size; pos = begin + len;
+ seq_printf(m,"\nLogical Drives:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -418,10 +367,9 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
}
if (drv_no == i) {
- size = sprintf(buffer+len,
+ seq_printf(m,
"\n Number: \t%-2d \tStatus: \t%s\n",
drv_no, hrec);
- len += size; pos = begin + len;
flag = TRUE;
no_mdrv = pcdi->cd_ldcnt;
if (no_mdrv > 1 || pcdi->ld_slave != -1) {
@@ -436,61 +384,37 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
} else {
strcpy(hrec, "???");
}
- size = sprintf(buffer+len,
+ seq_printf(m,
" Capacity [MB]:\t%-6d \tType: \t%s\n",
pcdi->ld_blkcnt/(1024*1024/pcdi->ld_blksize),
hrec);
- len += size; pos = begin + len;
} else {
- size = sprintf(buffer+len,
+ seq_printf(m,
" Slave Number: \t%-2d \tStatus: \t%s\n",
drv_no & 0x7fff, hrec);
- len += size; pos = begin + len;
}
drv_no = pcdi->ld_slave;
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length) {
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- goto stop_output;
- }
} while (drv_no != -1);
- if (is_mirr) {
- size = sprintf(buffer+len,
+ if (is_mirr)
+ seq_printf(m,
" Missing Drv.: \t%-2d \tInvalid Drv.: \t%d\n",
no_mdrv - j - k, k);
- len += size; pos = begin + len;
- }
-
+
if (!ha->hdr[i].is_arraydrv)
strcpy(hrec, "--");
else
sprintf(hrec, "%d", ha->hdr[i].master_no);
- size = sprintf(buffer+len,
+ seq_printf(m,
" To Array Drv.:\t%s\n", hrec);
- len += size; pos = begin + len;
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length) {
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- goto stop_output;
- }
}
gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- if (!flag) {
- size = sprintf(buffer+len, "\n --\n");
- len += size; pos = begin + len;
- }
+ if (!flag)
+ seq_printf(m, "\n --\n");
/* 4. about array drives */
- size = sprintf(buffer+len,"\nArray Drives:");
- len += size; pos = begin + len;
+ seq_printf(m,"\nArray Drives:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -525,10 +449,9 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
strcat(hrec, "/expand");
else if (pai->ai_ext_state & 0x1)
strcat(hrec, "/patch");
- size = sprintf(buffer+len,
+ seq_printf(m,
"\n Number: \t%-2d \tStatus: \t%s\n",
i,hrec);
- len += size; pos = begin + len;
flag = TRUE;
if (pai->ai_type == 0)
@@ -539,31 +462,19 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
strcpy(hrec, "RAID-5");
else
strcpy(hrec, "RAID-10");
- size = sprintf(buffer+len,
+ seq_printf(m,
" Capacity [MB]:\t%-6d \tType: \t%s\n",
pai->ai_size/(1024*1024/pai->ai_secsize),
hrec);
- len += size; pos = begin + len;
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length) {
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- goto stop_output;
- }
}
}
gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
- if (!flag) {
- size = sprintf(buffer+len, "\n --\n");
- len += size; pos = begin + len;
- }
+ if (!flag)
+ seq_printf(m, "\n --\n");
/* 5. about host drives */
- size = sprintf(buffer+len,"\nHost Drives:");
- len += size; pos = begin + len;
+ seq_printf(m,"\nHost Drives:");
flag = FALSE;
buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr);
@@ -605,33 +516,22 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
if (!(ha->hdr[i].present))
continue;
- size = sprintf(buffer+len,
+ seq_printf(m,
"\n Number: \t%-2d \tArr/Log. Drive:\t%d\n",
i, ha->hdr[i].ldr_no);
- len += size; pos = begin + len;
flag = TRUE;
- size = sprintf(buffer+len,
+ seq_printf(m,
" Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
(u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
- len += size; pos = begin + len;
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length)
- goto stop_output;
}
- if (!flag) {
- size = sprintf(buffer+len, "\n --\n");
- len += size; pos = begin + len;
- }
+ if (!flag)
+ seq_printf(m, "\n --\n");
}
/* controller events */
- size = sprintf(buffer+len,"\nController Events:\n");
- len += size; pos = begin + len;
+ seq_printf(m,"\nController Events:\n");
for (id = -1;;) {
id = gdth_read_event(ha, id, estr);
@@ -643,29 +543,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
do_gettimeofday(&tv);
sec = (int)(tv.tv_sec - estr->first_stamp);
if (sec < 0) sec = 0;
- size = sprintf(buffer+len," date- %02d:%02d:%02d\t%s\n",
+ seq_printf(m," date- %02d:%02d:%02d\t%s\n",
sec/3600, sec%3600/60, sec%60, hrec);
- len += size; pos = begin + len;
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- if (pos > offset + length)
- goto stop_output;
}
if (id == -1)
break;
}
-
stop_output:
- *start = buffer +(offset-begin);
- len -= (offset-begin);
- if (len > length)
- len = length;
- TRACE2(("get_info() len %d pos %d begin %d offset %d length %d size %d\n",
- len,(int)pos,(int)begin,(int)offset,length,size));
- rc = len;
-
+ rc = 0;
free_fail:
kfree(gdtcmd);
kfree(estr);
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index dab15f5..aaa6181 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -8,11 +8,6 @@
int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
int timeout, u32 *info);
-static int gdth_set_info(char *buffer,int length,struct Scsi_Host *host,
- gdth_ha_str *ha);
-static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
- struct Scsi_Host *host, gdth_ha_str *ha);
-
static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int length, gdth_ha_str *ha);
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index dbe4cc6..2203ac2 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -191,7 +191,8 @@ static int gvp11_bus_reset(struct scsi_cmnd *cmd)
static struct scsi_host_template gvp11_scsi_template = {
.module = THIS_MODULE,
.name = "GVP Series II SCSI",
- .proc_info = wd33c93_proc_info,
+ .show_info = wd33c93_show_info,
+ .write_info = wd33c93_write_info,
.proc_name = "GVP11",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 26cd9d1..89a8266 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -121,45 +121,26 @@ static inline void imm_pb_release(imm_struct *dev)
* testing...
* Also gives a method to use a script to obtain optimum timings (TODO)
*/
-static inline int imm_proc_write(imm_struct *dev, char *buffer, int length)
+static int imm_write_info(struct Scsi_Host *host, char *buffer, int length)
{
- unsigned long x;
+ imm_struct *dev = imm_dev(host);
if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) {
- x = simple_strtoul(buffer + 5, NULL, 0);
- dev->mode = x;
+ dev->mode = simple_strtoul(buffer + 5, NULL, 0);
return length;
}
printk("imm /proc: invalid variable\n");
- return (-EINVAL);
+ return -EINVAL;
}
-static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start,
- off_t offset, int length, int inout)
+static int imm_show_info(struct seq_file *m, struct Scsi_Host *host)
{
imm_struct *dev = imm_dev(host);
- int len = 0;
-
- if (inout)
- return imm_proc_write(dev, buffer, length);
-
- len += sprintf(buffer + len, "Version : %s\n", IMM_VERSION);
- len +=
- sprintf(buffer + len, "Parport : %s\n",
- dev->dev->port->name);
- len +=
- sprintf(buffer + len, "Mode : %s\n",
- IMM_MODE_STRING[dev->mode]);
- /* Request for beyond end of buffer */
- if (offset > len)
- return 0;
-
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- return len;
+ seq_printf(m, "Version : %s\n", IMM_VERSION);
+ seq_printf(m, "Parport : %s\n", dev->dev->port->name);
+ seq_printf(m, "Mode : %s\n", IMM_MODE_STRING[dev->mode]);
+ return 0;
}
#if IMM_DEBUG > 0
@@ -1118,7 +1099,8 @@ static int imm_adjust_queue(struct scsi_device *device)
static struct scsi_host_template imm_template = {
.module = THIS_MODULE,
.proc_name = "imm",
- .proc_info = imm_proc_info,
+ .show_info = imm_show_info,
+ .write_info = imm_write_info,
.name = "Iomega VPI2 (imm) interface",
.queuecommand = imm_queuecommand,
.eh_abort_handler = imm_abort,
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index deb5b6d..bf02821 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2166,152 +2166,117 @@ static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev,
}
-static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in)
+static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len)
{
#ifdef PROC_INTERFACE
char *bp;
- char tbuf[128];
- unsigned long flags;
struct IN2000_hostdata *hd;
- Scsi_Cmnd *cmd;
int x, i;
- static int stop = 0;
hd = (struct IN2000_hostdata *) instance->hostdata;
-/* If 'in' is TRUE we need to _read_ the proc file. We accept the following
- * keywords (same format as command-line, but only ONE per read):
- * debug
- * disconnect
- * period
- * resync
- * proc
- */
-
- if (in) {
- buf[len] = '\0';
- bp = buf;
- if (!strncmp(bp, "debug:", 6)) {
- bp += 6;
- hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
- } else if (!strncmp(bp, "disconnect:", 11)) {
- bp += 11;
- x = simple_strtoul(bp, NULL, 0);
- if (x < DIS_NEVER || x > DIS_ALWAYS)
- x = DIS_ADAPTIVE;
- hd->disconnect = x;
- } else if (!strncmp(bp, "period:", 7)) {
- bp += 7;
- x = simple_strtoul(bp, NULL, 0);
- hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
- } else if (!strncmp(bp, "resync:", 7)) {
- bp += 7;
- x = simple_strtoul(bp, NULL, 0);
- for (i = 0; i < 7; i++)
- if (x & (1 << i))
- hd->sync_stat[i] = SS_UNSET;
- } else if (!strncmp(bp, "proc:", 5)) {
- bp += 5;
- hd->proc = simple_strtoul(bp, NULL, 0);
- } else if (!strncmp(bp, "level2:", 7)) {
- bp += 7;
- hd->level2 = simple_strtoul(bp, NULL, 0);
- }
- return len;
+ buf[len] = '\0';
+ bp = buf;
+ if (!strncmp(bp, "debug:", 6)) {
+ bp += 6;
+ hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
+ } else if (!strncmp(bp, "disconnect:", 11)) {
+ bp += 11;
+ x = simple_strtoul(bp, NULL, 0);
+ if (x < DIS_NEVER || x > DIS_ALWAYS)
+ x = DIS_ADAPTIVE;
+ hd->disconnect = x;
+ } else if (!strncmp(bp, "period:", 7)) {
+ bp += 7;
+ x = simple_strtoul(bp, NULL, 0);
+ hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
+ } else if (!strncmp(bp, "resync:", 7)) {
+ bp += 7;
+ x = simple_strtoul(bp, NULL, 0);
+ for (i = 0; i < 7; i++)
+ if (x & (1 << i))
+ hd->sync_stat[i] = SS_UNSET;
+ } else if (!strncmp(bp, "proc:", 5)) {
+ bp += 5;
+ hd->proc = simple_strtoul(bp, NULL, 0);
+ } else if (!strncmp(bp, "level2:", 7)) {
+ bp += 7;
+ hd->level2 = simple_strtoul(bp, NULL, 0);
}
+#endif
+ return len;
+}
+
+static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+
+#ifdef PROC_INTERFACE
+ unsigned long flags;
+ struct IN2000_hostdata *hd;
+ Scsi_Cmnd *cmd;
+ int x;
+
+ hd = (struct IN2000_hostdata *) instance->hostdata;
spin_lock_irqsave(instance->host_lock, flags);
- bp = buf;
- *bp = '\0';
- if (hd->proc & PR_VERSION) {
- sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
- strcat(bp, tbuf);
- }
+ if (hd->proc & PR_VERSION)
+ seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
+
if (hd->proc & PR_INFO) {
- sprintf(tbuf, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
- strcat(bp, tbuf);
- strcat(bp, "\nsync_xfer[] = ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%02x", hd->sync_xfer[x]);
- strcat(bp, tbuf);
- }
- strcat(bp, "\nsync_stat[] = ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%02x", hd->sync_stat[x]);
- strcat(bp, tbuf);
- }
+ seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
+ seq_printf(m, "\nsync_xfer[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_xfer[x]);
+ seq_printf(m, "\nsync_stat[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_stat[x]);
}
#ifdef PROC_STATISTICS
if (hd->proc & PR_STATISTICS) {
- strcat(bp, "\ncommands issued: ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]);
- strcat(bp, tbuf);
- }
- strcat(bp, "\ndisconnects allowed:");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]);
- strcat(bp, tbuf);
- }
- strcat(bp, "\ndisconnects done: ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]);
- strcat(bp, tbuf);
- }
- sprintf(tbuf, "\ninterrupts: \t%ld", hd->int_cnt);
- strcat(bp, tbuf);
+ seq_printf(m, "\ncommands issued: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
+ seq_printf(m, "\ndisconnects allowed:");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
+ seq_printf(m, "\ndisconnects done: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
+ seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt);
}
#endif
if (hd->proc & PR_CONNECTED) {
- strcat(bp, "\nconnected: ");
+ seq_printf(m, "\nconnected: ");
if (hd->connected) {
cmd = (Scsi_Cmnd *) hd->connected;
- sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- strcat(bp, tbuf);
+ seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
}
}
if (hd->proc & PR_INPUTQ) {
- strcat(bp, "\ninput_Q: ");
+ seq_printf(m, "\ninput_Q: ");
cmd = (Scsi_Cmnd *) hd->input_Q;
while (cmd) {
- sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- strcat(bp, tbuf);
+ seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_DISCQ) {
- strcat(bp, "\ndisconnected_Q:");
+ seq_printf(m, "\ndisconnected_Q:");
cmd = (Scsi_Cmnd *) hd->disconnected_Q;
while (cmd) {
- sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- strcat(bp, tbuf);
+ seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_TEST) {
; /* insert your own custom function here */
}
- strcat(bp, "\n");
+ seq_printf(m, "\n");
spin_unlock_irqrestore(instance->host_lock, flags);
- *start = buf;
- if (stop) {
- stop = 0;
- return 0; /* return 0 to signal end-of-file */
- }
- if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */
- stop = 1;
- if (hd->proc & PR_STOP) /* stop every other time */
- stop = 1;
- return strlen(bp);
-
-#else /* PROC_INTERFACE */
-
- return 0;
-
#endif /* PROC_INTERFACE */
-
+ return 0;
}
MODULE_LICENSE("GPL");
@@ -2319,7 +2284,8 @@ MODULE_LICENSE("GPL");
static struct scsi_host_template driver_template = {
.proc_name = "in2000",
- .proc_info = in2000_proc_info,
+ .write_info = in2000_write_info,
+ .show_info = in2000_show_info,
.name = "Always IN2000",
.detect = in2000_detect,
.release = in2000_release,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 9aa86a3..8d5ea8a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -326,10 +326,9 @@ static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
unsigned int count);
-static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
-static int ips_host_info(ips_ha_t *, char *, off_t, int);
-static void copy_mem_info(IPS_INFOSTR *, char *, int);
-static int copy_info(IPS_INFOSTR *, char *, ...);
+static int ips_write_info(struct Scsi_Host *, char *, int);
+static int ips_show_info(struct seq_file *, struct Scsi_Host *);
+static int ips_host_info(ips_ha_t *, struct seq_file *);
static int ips_abort_init(ips_ha_t * ha, int index);
static int ips_init_phase2(int index);
@@ -367,7 +366,8 @@ static struct scsi_host_template ips_driver_template = {
.eh_abort_handler = ips_eh_abort,
.eh_host_reset_handler = ips_eh_reset,
.proc_name = "ips",
- .proc_info = ips_proc_info,
+ .show_info = ips_show_info,
+ .write_info = ips_write_info,
.slave_configure = ips_slave_configure,
.bios_param = ips_biosparam,
.this_id = -1,
@@ -1433,25 +1433,12 @@ ips_info(struct Scsi_Host *SH)
return (bp);
}
-/****************************************************************************/
-/* */
-/* Routine Name: ips_proc_info */
-/* */
-/* Routine Description: */
-/* */
-/* The passthru interface for the driver */
-/* */
-/****************************************************************************/
static int
-ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int func)
+ips_write_info(struct Scsi_Host *host, char *buffer, int length)
{
int i;
- int ret;
ips_ha_t *ha = NULL;
- METHOD_TRACE("ips_proc_info", 1);
-
/* Find our host structure */
for (i = 0; i < ips_next_controller; i++) {
if (ips_sh[i]) {
@@ -1465,18 +1452,29 @@ ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
if (!ha)
return (-EINVAL);
- if (func) {
- /* write */
- return (0);
- } else {
- /* read */
- if (start)
- *start = buffer;
+ return 0;
+}
- ret = ips_host_info(ha, buffer, offset, length);
+static int
+ips_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ int i;
+ ips_ha_t *ha = NULL;
- return (ret);
+ /* Find our host structure */
+ for (i = 0; i < ips_next_controller; i++) {
+ if (ips_sh[i]) {
+ if (ips_sh[i] == host) {
+ ha = (ips_ha_t *) ips_sh[i]->hostdata;
+ break;
+ }
+ }
}
+
+ if (!ha)
+ return (-EINVAL);
+
+ return ips_host_info(ha, m);
}
/*--------------------------------------------------------------------------*/
@@ -2035,183 +2033,113 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
/* */
/****************************************************************************/
static int
-ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len)
+ips_host_info(ips_ha_t *ha, struct seq_file *m)
{
- IPS_INFOSTR info;
-
METHOD_TRACE("ips_host_info", 1);
- info.buffer = ptr;
- info.length = len;
- info.offset = offset;
- info.pos = 0;
- info.localpos = 0;
-
- copy_info(&info, "\nIBM ServeRAID General Information:\n\n");
+ seq_printf(m, "\nIBM ServeRAID General Information:\n\n");
if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
(le16_to_cpu(ha->nvram->adapter_type) != 0))
- copy_info(&info, "\tController Type : %s\n",
+ seq_printf(m, "\tController Type : %s\n",
ips_adapter_name[ha->ad_type - 1]);
else
- copy_info(&info,
+ seq_printf(m,
"\tController Type : Unknown\n");
if (ha->io_addr)
- copy_info(&info,
- "\tIO region : 0x%lx (%d bytes)\n",
+ seq_printf(m,
+ "\tIO region : 0x%x (%d bytes)\n",
ha->io_addr, ha->io_len);
if (ha->mem_addr) {
- copy_info(&info,
- "\tMemory region : 0x%lx (%d bytes)\n",
+ seq_printf(m,
+ "\tMemory region : 0x%x (%d bytes)\n",
ha->mem_addr, ha->mem_len);
- copy_info(&info,
+ seq_printf(m,
"\tShared memory address : 0x%lx\n",
- ha->mem_ptr);
+ (unsigned long)ha->mem_ptr);
}
- copy_info(&info, "\tIRQ number : %d\n", ha->pcidev->irq);
+ seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
/* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */
/* That keeps everything happy for "text" operations on the proc file. */
if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
if (ha->nvram->bios_low[3] == 0) {
- copy_info(&info,
- "\tBIOS Version : %c%c%c%c%c%c%c\n",
- ha->nvram->bios_high[0], ha->nvram->bios_high[1],
- ha->nvram->bios_high[2], ha->nvram->bios_high[3],
- ha->nvram->bios_low[0], ha->nvram->bios_low[1],
- ha->nvram->bios_low[2]);
+ seq_printf(m,
+ "\tBIOS Version : %c%c%c%c%c%c%c\n",
+ ha->nvram->bios_high[0], ha->nvram->bios_high[1],
+ ha->nvram->bios_high[2], ha->nvram->bios_high[3],
+ ha->nvram->bios_low[0], ha->nvram->bios_low[1],
+ ha->nvram->bios_low[2]);
} else {
- copy_info(&info,
- "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
- ha->nvram->bios_high[0], ha->nvram->bios_high[1],
- ha->nvram->bios_high[2], ha->nvram->bios_high[3],
- ha->nvram->bios_low[0], ha->nvram->bios_low[1],
- ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
+ seq_printf(m,
+ "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
+ ha->nvram->bios_high[0], ha->nvram->bios_high[1],
+ ha->nvram->bios_high[2], ha->nvram->bios_high[3],
+ ha->nvram->bios_low[0], ha->nvram->bios_low[1],
+ ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
}
}
if (ha->enq->CodeBlkVersion[7] == 0) {
- copy_info(&info,
- "\tFirmware Version : %c%c%c%c%c%c%c\n",
- ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
- ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
- ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
- ha->enq->CodeBlkVersion[6]);
+ seq_printf(m,
+ "\tFirmware Version : %c%c%c%c%c%c%c\n",
+ ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
+ ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
+ ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
+ ha->enq->CodeBlkVersion[6]);
} else {
- copy_info(&info,
- "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
- ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
- ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
- ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
- ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
+ seq_printf(m,
+ "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
+ ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
+ ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
+ ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
+ ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
}
if (ha->enq->BootBlkVersion[7] == 0) {
- copy_info(&info,
- "\tBoot Block Version : %c%c%c%c%c%c%c\n",
- ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
- ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
- ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
- ha->enq->BootBlkVersion[6]);
+ seq_printf(m,
+ "\tBoot Block Version : %c%c%c%c%c%c%c\n",
+ ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
+ ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
+ ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
+ ha->enq->BootBlkVersion[6]);
} else {
- copy_info(&info,
- "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
- ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
- ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
- ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
- ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
+ seq_printf(m,
+ "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
+ ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
+ ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
+ ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
+ ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
}
- copy_info(&info, "\tDriver Version : %s%s\n",
+ seq_printf(m, "\tDriver Version : %s%s\n",
IPS_VERSION_HIGH, IPS_VERSION_LOW);
- copy_info(&info, "\tDriver Build : %d\n",
+ seq_printf(m, "\tDriver Build : %d\n",
IPS_BUILD_IDENT);
- copy_info(&info, "\tMax Physical Devices : %d\n",
+ seq_printf(m, "\tMax Physical Devices : %d\n",
ha->enq->ucMaxPhysicalDevices);
- copy_info(&info, "\tMax Active Commands : %d\n",
+ seq_printf(m, "\tMax Active Commands : %d\n",
ha->max_cmds);
- copy_info(&info, "\tCurrent Queued Commands : %d\n",
+ seq_printf(m, "\tCurrent Queued Commands : %d\n",
ha->scb_waitlist.count);
- copy_info(&info, "\tCurrent Active Commands : %d\n",
+ seq_printf(m, "\tCurrent Active Commands : %d\n",
ha->scb_activelist.count - ha->num_ioctl);
- copy_info(&info, "\tCurrent Queued PT Commands : %d\n",
+ seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
ha->copp_waitlist.count);
- copy_info(&info, "\tCurrent Active PT Commands : %d\n",
+ seq_printf(m, "\tCurrent Active PT Commands : %d\n",
ha->num_ioctl);
- copy_info(&info, "\n");
-
- return (info.localpos);
-}
-
-/****************************************************************************/
-/* */
-/* Routine Name: copy_mem_info */
-/* */
-/* Routine Description: */
-/* */
-/* Copy data into an IPS_INFOSTR structure */
-/* */
-/****************************************************************************/
-static void
-copy_mem_info(IPS_INFOSTR * info, char *data, int len)
-{
- METHOD_TRACE("copy_mem_info", 1);
-
- if (info->pos + len < info->offset) {
- info->pos += len;
- return;
- }
-
- if (info->pos < info->offset) {
- data += (info->offset - info->pos);
- len -= (info->offset - info->pos);
- info->pos += (info->offset - info->pos);
- }
-
- if (info->localpos + len > info->length)
- len = info->length - info->localpos;
+ seq_printf(m, "\n");
- if (len > 0) {
- memcpy(info->buffer + info->localpos, data, len);
- info->pos += len;
- info->localpos += len;
- }
-}
-
-/****************************************************************************/
-/* */
-/* Routine Name: copy_info */
-/* */
-/* Routine Description: */
-/* */
-/* printf style wrapper for an info structure */
-/* */
-/****************************************************************************/
-static int
-copy_info(IPS_INFOSTR * info, char *fmt, ...)
-{
- va_list args;
- char buf[128];
- int len;
-
- METHOD_TRACE("copy_info", 1);
-
- va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
- va_end(args);
-
- copy_mem_info(info, buf, len);
-
- return (len);
+ return 0;
}
/****************************************************************************/
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index f2df059..45b9566 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -416,7 +416,6 @@
/*
* Scsi_Host Template
*/
- static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[]);
static int ips_slave_configure(struct scsi_device *SDptr);
@@ -959,14 +958,6 @@ typedef union {
IPS_ENH_SG_LIST *enh_list;
} IPS_SG_LIST;
-typedef struct _IPS_INFOSTR {
- char *buffer;
- int length;
- int offset;
- int pos;
- int localpos;
-} IPS_INFOSTR;
-
typedef struct {
char *option_name;
int *option_flag;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 74cc4a0..446b851 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
+ if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
- rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
+ __func__, bio_segments(req->bio), blk_rq_bytes(req),
+ bio_segments(rsp->bio), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 3b574a0..cb465b2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10797,36 +10797,6 @@ lpfc_io_resume(struct pci_dev *pdev)
return;
}
-/**
- * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
- * @inode: pointer to the inode representing the lpfcmgmt device
- * @filep: pointer to the file representing the open lpfcmgmt device
- *
- * This routine puts a reference count on the lpfc module whenever the
- * character device is opened
- **/
-static int
-lpfc_mgmt_open(struct inode *inode, struct file *filep)
-{
- try_module_get(THIS_MODULE);
- return 0;
-}
-
-/**
- * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
- * @inode: pointer to the inode representing the lpfcmgmt device
- * @filep: pointer to the file representing the open lpfcmgmt device
- *
- * This routine removes a reference count from the lpfc module when the
- * character device is closed
- **/
-static int
-lpfc_mgmt_release(struct inode *inode, struct file *filep)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
static struct pci_device_id lpfc_id_table[] = {
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
PCI_ANY_ID, PCI_ANY_ID, },
@@ -10944,8 +10914,7 @@ static struct pci_driver lpfc_driver = {
};
static const struct file_operations lpfc_mgmt_fop = {
- .open = lpfc_mgmt_open,
- .release = lpfc_mgmt_release,
+ .owner = THIS_MODULE,
};
static struct miscdevice lpfc_mgmt_dev = {
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 24828b5..8580757 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -561,7 +561,8 @@ static int macscsi_pwrite (struct Scsi_Host *instance,
static struct scsi_host_template driver_template = {
.proc_name = "Mac5380",
- .proc_info = macscsi_proc_info,
+ .show_info = macscsi_show_info,
+ .write_info = macscsi_write_info,
.name = "Macintosh NCR5380 SCSI",
.detect = macscsi_detect,
.release = macscsi_release,
diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h
index d26e331..7dc62fc 100644
--- a/drivers/scsi/mac_scsi.h
+++ b/drivers/scsi/mac_scsi.h
@@ -72,7 +72,8 @@
#define NCR5380_queue_command macscsi_queue_command
#define NCR5380_abort macscsi_abort
#define NCR5380_bus_reset macscsi_bus_reset
-#define NCR5380_proc_info macscsi_proc_info
+#define NCR5380_show_info macscsi_show_info
+#define NCR5380_write_info macscsi_write_info
#define BOARD_NORMAL 0
#define BOARD_NCR53C400 1
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9504ec0..846f475f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -39,6 +39,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/reboot.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -2069,385 +2070,201 @@ mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
#ifdef CONFIG_PROC_FS
/* Following code handles /proc fs */
-#define CREATE_READ_PROC(string, func) create_proc_read_entry(string, \
- S_IRUSR | S_IFREG, \
- controller_proc_dir_entry, \
- func, adapter)
-
-/**
- * mega_create_proc_entry()
- * @index - index in soft state array
- * @parent - parent node for this /proc entry
- *
- * Creates /proc entries for our controllers.
- */
-static void
-mega_create_proc_entry(int index, struct proc_dir_entry *parent)
-{
- struct proc_dir_entry *controller_proc_dir_entry = NULL;
- u8 string[64] = { 0 };
- adapter_t *adapter = hba_soft_state[index];
-
- sprintf(string, "hba%d", adapter->host->host_no);
-
- controller_proc_dir_entry =
- adapter->controller_proc_dir_entry = proc_mkdir(string, parent);
-
- if(!controller_proc_dir_entry) {
- printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n");
- return;
- }
- adapter->proc_read = CREATE_READ_PROC("config", proc_read_config);
- adapter->proc_stat = CREATE_READ_PROC("stat", proc_read_stat);
- adapter->proc_mbox = CREATE_READ_PROC("mailbox", proc_read_mbox);
-#if MEGA_HAVE_ENH_PROC
- adapter->proc_rr = CREATE_READ_PROC("rebuild-rate", proc_rebuild_rate);
- adapter->proc_battery = CREATE_READ_PROC("battery-status",
- proc_battery);
-
- /*
- * Display each physical drive on its channel
- */
- adapter->proc_pdrvstat[0] = CREATE_READ_PROC("diskdrives-ch0",
- proc_pdrv_ch0);
- adapter->proc_pdrvstat[1] = CREATE_READ_PROC("diskdrives-ch1",
- proc_pdrv_ch1);
- adapter->proc_pdrvstat[2] = CREATE_READ_PROC("diskdrives-ch2",
- proc_pdrv_ch2);
- adapter->proc_pdrvstat[3] = CREATE_READ_PROC("diskdrives-ch3",
- proc_pdrv_ch3);
-
- /*
- * Display a set of up to 10 logical drive through each of following
- * /proc entries
- */
- adapter->proc_rdrvstat[0] = CREATE_READ_PROC("raiddrives-0-9",
- proc_rdrv_10);
- adapter->proc_rdrvstat[1] = CREATE_READ_PROC("raiddrives-10-19",
- proc_rdrv_20);
- adapter->proc_rdrvstat[2] = CREATE_READ_PROC("raiddrives-20-29",
- proc_rdrv_30);
- adapter->proc_rdrvstat[3] = CREATE_READ_PROC("raiddrives-30-39",
- proc_rdrv_40);
-#endif
-}
-
-
/**
- * proc_read_config()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_config()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
* Display configuration information about the controller.
*/
static int
-proc_read_config(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_config(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
- int len = 0;
-
- len += sprintf(page+len, "%s", MEGARAID_VERSION);
+ adapter_t *adapter = m->private;
+ seq_puts(m, MEGARAID_VERSION);
if(adapter->product_info.product_name[0])
- len += sprintf(page+len, "%s\n",
- adapter->product_info.product_name);
-
- len += sprintf(page+len, "Controller Type: ");
+ seq_printf(m, "%s\n", adapter->product_info.product_name);
- if( adapter->flag & BOARD_MEMMAP ) {
- len += sprintf(page+len,
- "438/466/467/471/493/518/520/531/532\n");
- }
- else {
- len += sprintf(page+len,
- "418/428/434\n");
- }
+ seq_puts(m, "Controller Type: ");
- if(adapter->flag & BOARD_40LD) {
- len += sprintf(page+len,
- "Controller Supports 40 Logical Drives\n");
- }
+ if( adapter->flag & BOARD_MEMMAP )
+ seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
+ else
+ seq_puts(m, "418/428/434\n");
- if(adapter->flag & BOARD_64BIT) {
- len += sprintf(page+len,
- "Controller capable of 64-bit memory addressing\n");
- }
- if( adapter->has_64bit_addr ) {
- len += sprintf(page+len,
- "Controller using 64-bit memory addressing\n");
- }
- else {
- len += sprintf(page+len,
- "Controller is not using 64-bit memory addressing\n");
- }
+ if(adapter->flag & BOARD_40LD)
+ seq_puts(m, "Controller Supports 40 Logical Drives\n");
- len += sprintf(page+len, "Base = %08lx, Irq = %d, ", adapter->base,
- adapter->host->irq);
-
- len += sprintf(page+len, "Logical Drives = %d, Channels = %d\n",
- adapter->numldrv, adapter->product_info.nchannels);
-
- len += sprintf(page+len, "Version =%s:%s, DRAM = %dMb\n",
- adapter->fw_version, adapter->bios_version,
- adapter->product_info.dram_size);
-
- len += sprintf(page+len,
- "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
- adapter->product_info.max_commands, adapter->max_cmds);
-
- len += sprintf(page+len, "support_ext_cdb = %d\n",
- adapter->support_ext_cdb);
- len += sprintf(page+len, "support_random_del = %d\n",
- adapter->support_random_del);
- len += sprintf(page+len, "boot_ldrv_enabled = %d\n",
- adapter->boot_ldrv_enabled);
- len += sprintf(page+len, "boot_ldrv = %d\n",
- adapter->boot_ldrv);
- len += sprintf(page+len, "boot_pdrv_enabled = %d\n",
- adapter->boot_pdrv_enabled);
- len += sprintf(page+len, "boot_pdrv_ch = %d\n",
- adapter->boot_pdrv_ch);
- len += sprintf(page+len, "boot_pdrv_tgt = %d\n",
- adapter->boot_pdrv_tgt);
- len += sprintf(page+len, "quiescent = %d\n",
- atomic_read(&adapter->quiescent));
- len += sprintf(page+len, "has_cluster = %d\n",
- adapter->has_cluster);
-
- len += sprintf(page+len, "\nModule Parameters:\n");
- len += sprintf(page+len, "max_cmd_per_lun = %d\n",
- max_cmd_per_lun);
- len += sprintf(page+len, "max_sectors_per_io = %d\n",
- max_sectors_per_io);
-
- *eof = 1;
-
- return len;
+ if(adapter->flag & BOARD_64BIT)
+ seq_puts(m, "Controller capable of 64-bit memory addressing\n");
+ if( adapter->has_64bit_addr )
+ seq_puts(m, "Controller using 64-bit memory addressing\n");
+ else
+ seq_puts(m, "Controller is not using 64-bit memory addressing\n");
+
+ seq_printf(m, "Base = %08lx, Irq = %d, ",
+ adapter->base, adapter->host->irq);
+
+ seq_printf(m, "Logical Drives = %d, Channels = %d\n",
+ adapter->numldrv, adapter->product_info.nchannels);
+
+ seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
+ adapter->fw_version, adapter->bios_version,
+ adapter->product_info.dram_size);
+
+ seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
+ adapter->product_info.max_commands, adapter->max_cmds);
+
+ seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
+ seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
+ seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
+ seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
+ seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
+ seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
+ seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
+ seq_printf(m, "quiescent = %d\n",
+ atomic_read(&adapter->quiescent));
+ seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
+
+ seq_puts(m, "\nModule Parameters:\n");
+ seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
+ seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
+ return 0;
}
-
-
/**
- * proc_read_stat()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_stat()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
- * Diaplay statistical information about the I/O activity.
+ * Display statistical information about the I/O activity.
*/
static int
-proc_read_stat(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_stat(struct seq_file *m, void *v)
{
- adapter_t *adapter;
- int len;
+ adapter_t *adapter = m->private;
+#if MEGA_HAVE_STATS
int i;
+#endif
- i = 0; /* avoid compilation warnings */
- len = 0;
- adapter = (adapter_t *)data;
-
- len = sprintf(page, "Statistical Information for this controller\n");
- len += sprintf(page+len, "pend_cmds = %d\n",
- atomic_read(&adapter->pend_cmds));
+ seq_puts(m, "Statistical Information for this controller\n");
+ seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
#if MEGA_HAVE_STATS
for(i = 0; i < adapter->numldrv; i++) {
- len += sprintf(page+len, "Logical Drive %d:\n", i);
-
- len += sprintf(page+len,
- "\tReads Issued = %lu, Writes Issued = %lu\n",
- adapter->nreads[i], adapter->nwrites[i]);
-
- len += sprintf(page+len,
- "\tSectors Read = %lu, Sectors Written = %lu\n",
- adapter->nreadblocks[i], adapter->nwriteblocks[i]);
-
- len += sprintf(page+len,
- "\tRead errors = %lu, Write errors = %lu\n\n",
- adapter->rd_errors[i], adapter->wr_errors[i]);
+ seq_printf(m, "Logical Drive %d:\n", i);
+ seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
+ adapter->nreads[i], adapter->nwrites[i]);
+ seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
+ adapter->nreadblocks[i], adapter->nwriteblocks[i]);
+ seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
+ adapter->rd_errors[i], adapter->wr_errors[i]);
}
#else
- len += sprintf(page+len,
- "IO and error counters not compiled in driver.\n");
+ seq_puts(m, "IO and error counters not compiled in driver.\n");
#endif
-
- *eof = 1;
-
- return len;
+ return 0;
}
/**
- * proc_read_mbox()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_mbox()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
* Display mailbox information for the last command issued. This information
* is good for debugging.
*/
static int
-proc_read_mbox(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_mbox(struct seq_file *m, void *v)
{
-
- adapter_t *adapter = (adapter_t *)data;
+ adapter_t *adapter = m->private;
volatile mbox_t *mbox = adapter->mbox;
- int len = 0;
-
- len = sprintf(page, "Contents of Mail Box Structure\n");
- len += sprintf(page+len, " Fw Command = 0x%02x\n",
- mbox->m_out.cmd);
- len += sprintf(page+len, " Cmd Sequence = 0x%02x\n",
- mbox->m_out.cmdid);
- len += sprintf(page+len, " No of Sectors= %04d\n",
- mbox->m_out.numsectors);
- len += sprintf(page+len, " LBA = 0x%02x\n",
- mbox->m_out.lba);
- len += sprintf(page+len, " DTA = 0x%08x\n",
- mbox->m_out.xferaddr);
- len += sprintf(page+len, " Logical Drive= 0x%02x\n",
- mbox->m_out.logdrv);
- len += sprintf(page+len, " No of SG Elmt= 0x%02x\n",
- mbox->m_out.numsgelements);
- len += sprintf(page+len, " Busy = %01x\n",
- mbox->m_in.busy);
- len += sprintf(page+len, " Status = 0x%02x\n",
- mbox->m_in.status);
-
- *eof = 1;
-
- return len;
+
+ seq_puts(m, "Contents of Mail Box Structure\n");
+ seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
+ seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
+ seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
+ seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
+ seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
+ seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
+ seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
+ seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
+ seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
+ return 0;
}
/**
- * proc_rebuild_rate()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_rebuild_rate()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
* Display current rebuild rate
*/
static int
-proc_rebuild_rate(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_rebuild_rate(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
+ adapter_t *adapter = m->private;
dma_addr_t dma_handle;
caddr_t inquiry;
struct pci_dev *pdev;
- int len = 0;
- if( make_local_pdev(adapter, &pdev) != 0 ) {
- *eof = 1;
- return len;
- }
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
- if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) {
- free_local_pdev(pdev);
- *eof = 1;
- return len;
- }
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
-
- len = sprintf(page, "Adapter inquiry failed.\n");
-
+ seq_puts(m, "Adapter inquiry failed.\n");
printk(KERN_WARNING "megaraid: inquiry failed.\n");
-
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
- free_local_pdev(pdev);
-
- *eof = 1;
-
- return len;
+ goto free_inquiry;
}
- if( adapter->flag & BOARD_40LD ) {
- len = sprintf(page, "Rebuild Rate: [%d%%]\n",
- ((mega_inquiry3 *)inquiry)->rebuild_rate);
- }
- else {
- len = sprintf(page, "Rebuild Rate: [%d%%]\n",
+ if( adapter->flag & BOARD_40LD )
+ seq_printf(m, "Rebuild Rate: [%d%%]\n",
+ ((mega_inquiry3 *)inquiry)->rebuild_rate);
+ else
+ seq_printf(m, "Rebuild Rate: [%d%%]\n",
((mraid_ext_inquiry *)
- inquiry)->raid_inq.adapter_info.rebuild_rate);
- }
-
+ inquiry)->raid_inq.adapter_info.rebuild_rate);
+free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
-
+free_pdev:
free_local_pdev(pdev);
-
- *eof = 1;
-
- return len;
+ return 0;
}
/**
- * proc_battery()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_battery()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
* Display information about the battery module on the controller.
*/
static int
-proc_battery(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_battery(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
+ adapter_t *adapter = m->private;
dma_addr_t dma_handle;
caddr_t inquiry;
struct pci_dev *pdev;
- u8 battery_status = 0;
- char str[256];
- int len = 0;
+ u8 battery_status;
- if( make_local_pdev(adapter, &pdev) != 0 ) {
- *eof = 1;
- return len;
- }
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
- if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) {
- free_local_pdev(pdev);
- *eof = 1;
- return len;
- }
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
-
- len = sprintf(page, "Adapter inquiry failed.\n");
-
+ seq_printf(m, "Adapter inquiry failed.\n");
printk(KERN_WARNING "megaraid: inquiry failed.\n");
-
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
- free_local_pdev(pdev);
-
- *eof = 1;
-
- return len;
+ goto free_inquiry;
}
if( adapter->flag & BOARD_40LD ) {
@@ -2461,146 +2278,80 @@ proc_battery(char *page, char **start, off_t offset, int count, int *eof,
/*
* Decode the battery status
*/
- sprintf(str, "Battery Status:[%d]", battery_status);
+ seq_printf(m, "Battery Status:[%d]", battery_status);
if(battery_status == MEGA_BATT_CHARGE_DONE)
- strcat(str, " Charge Done");
+ seq_puts(m, " Charge Done");
if(battery_status & MEGA_BATT_MODULE_MISSING)
- strcat(str, " Module Missing");
+ seq_puts(m, " Module Missing");
if(battery_status & MEGA_BATT_LOW_VOLTAGE)
- strcat(str, " Low Voltage");
+ seq_puts(m, " Low Voltage");
if(battery_status & MEGA_BATT_TEMP_HIGH)
- strcat(str, " Temperature High");
+ seq_puts(m, " Temperature High");
if(battery_status & MEGA_BATT_PACK_MISSING)
- strcat(str, " Pack Missing");
+ seq_puts(m, " Pack Missing");
if(battery_status & MEGA_BATT_CHARGE_INPROG)
- strcat(str, " Charge In-progress");
+ seq_puts(m, " Charge In-progress");
if(battery_status & MEGA_BATT_CHARGE_FAIL)
- strcat(str, " Charge Fail");
+ seq_puts(m, " Charge Fail");
if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
- strcat(str, " Cycles Exceeded");
-
- len = sprintf(page, "%s\n", str);
+ seq_puts(m, " Cycles Exceeded");
+ seq_putc(m, '\n');
+free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
-
+free_pdev:
free_local_pdev(pdev);
-
- *eof = 1;
-
- return len;
-}
-
-
-/**
- * proc_pdrv_ch0()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
- *
- * Display information about the physical drives on physical channel 0.
- */
-static int
-proc_pdrv_ch0(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
-{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_pdrv(adapter, page, 0));
-}
-
-
-/**
- * proc_pdrv_ch1()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
- *
- * Display information about the physical drives on physical channel 1.
- */
-static int
-proc_pdrv_ch1(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
-{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_pdrv(adapter, page, 1));
+ return 0;
}
-/**
- * proc_pdrv_ch2()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
- *
- * Display information about the physical drives on physical channel 2.
+/*
+ * Display scsi inquiry
*/
-static int
-proc_pdrv_ch2(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+static void
+mega_print_inquiry(struct seq_file *m, char *scsi_inq)
{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_pdrv(adapter, page, 2));
-}
+ int i;
+ seq_puts(m, " Vendor: ");
+ seq_write(m, scsi_inq + 8, 8);
+ seq_puts(m, " Model: ");
+ seq_write(m, scsi_inq + 16, 16);
+ seq_puts(m, " Rev: ");
+ seq_write(m, scsi_inq + 32, 4);
+ seq_putc(m, '\n');
-/**
- * proc_pdrv_ch3()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
- *
- * Display information about the physical drives on physical channel 3.
- */
-static int
-proc_pdrv_ch3(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
-{
- adapter_t *adapter = (adapter_t *)data;
+ i = scsi_inq[0] & 0x1f;
+ seq_printf(m, " Type: %s ", scsi_device_type(i));
- *eof = 1;
+ seq_printf(m, " ANSI SCSI revision: %02x",
+ scsi_inq[2] & 0x07);
- return (proc_pdrv(adapter, page, 3));
+ if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
+ seq_puts(m, " CCS\n");
+ else
+ seq_putc(m, '\n');
}
-
/**
- * proc_pdrv()
+ * proc_show_pdrv()
+ * @m - Synthetic file construction data
* @page - buffer to write the data in
* @adapter - pointer to our soft state
*
* Display information about the physical drives.
*/
static int
-proc_pdrv(adapter_t *adapter, char *page, int channel)
+proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
{
dma_addr_t dma_handle;
char *scsi_inq;
@@ -2611,32 +2362,24 @@ proc_pdrv(adapter_t *adapter, char *page, int channel)
u8 state;
int tgt;
int max_channels;
- int len = 0;
- char str[80];
int i;
- if( make_local_pdev(adapter, &pdev) != 0 ) {
- return len;
- }
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
- if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) {
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
goto free_pdev;
- }
if( mega_adapinq(adapter, dma_handle) != 0 ) {
- len = sprintf(page, "Adapter inquiry failed.\n");
-
+ seq_puts(m, "Adapter inquiry failed.\n");
printk(KERN_WARNING "megaraid: inquiry failed.\n");
-
goto free_inquiry;
}
scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
-
if( scsi_inq == NULL ) {
- len = sprintf(page, "memory not available for scsi inq.\n");
-
+ seq_puts(m, "memory not available for scsi inq.\n");
goto free_inquiry;
}
@@ -2659,39 +2402,31 @@ proc_pdrv(adapter_t *adapter, char *page, int channel)
i = channel*16 + tgt;
state = *(pdrv_state + i);
-
switch( state & 0x0F ) {
-
case PDRV_ONLINE:
- sprintf(str,
- "Channel:%2d Id:%2d State: Online",
- channel, tgt);
+ seq_printf(m, "Channel:%2d Id:%2d State: Online",
+ channel, tgt);
break;
case PDRV_FAILED:
- sprintf(str,
- "Channel:%2d Id:%2d State: Failed",
- channel, tgt);
+ seq_printf(m, "Channel:%2d Id:%2d State: Failed",
+ channel, tgt);
break;
case PDRV_RBLD:
- sprintf(str,
- "Channel:%2d Id:%2d State: Rebuild",
- channel, tgt);
+ seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
+ channel, tgt);
break;
case PDRV_HOTSPARE:
- sprintf(str,
- "Channel:%2d Id:%2d State: Hot spare",
- channel, tgt);
+ seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
+ channel, tgt);
break;
default:
- sprintf(str,
- "Channel:%2d Id:%2d State: Un-configured",
- channel, tgt);
+ seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
+ channel, tgt);
break;
-
}
/*
@@ -2710,11 +2445,8 @@ proc_pdrv(adapter_t *adapter, char *page, int channel)
* Check for overflow. We print less than 240
* characters for inquiry
*/
- if( (len + 240) >= PAGE_SIZE ) break;
-
- len += sprintf(page+len, "%s.\n", str);
-
- len += mega_print_inquiry(page+len, scsi_inq);
+ seq_puts(m, ".\n");
+ mega_print_inquiry(m, scsi_inq);
}
free_pci:
@@ -2723,150 +2455,68 @@ free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
free_local_pdev(pdev);
-
- return len;
-}
-
-
-/*
- * Display scsi inquiry
- */
-static int
-mega_print_inquiry(char *page, char *scsi_inq)
-{
- int len = 0;
- int i;
-
- len = sprintf(page, " Vendor: ");
- for( i = 8; i < 16; i++ ) {
- len += sprintf(page+len, "%c", scsi_inq[i]);
- }
-
- len += sprintf(page+len, " Model: ");
-
- for( i = 16; i < 32; i++ ) {
- len += sprintf(page+len, "%c", scsi_inq[i]);
- }
-
- len += sprintf(page+len, " Rev: ");
-
- for( i = 32; i < 36; i++ ) {
- len += sprintf(page+len, "%c", scsi_inq[i]);
- }
-
- len += sprintf(page+len, "\n");
-
- i = scsi_inq[0] & 0x1f;
-
- len += sprintf(page+len, " Type: %s ", scsi_device_type(i));
-
- len += sprintf(page+len,
- " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
-
- if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
- len += sprintf(page+len, " CCS\n");
- else
- len += sprintf(page+len, "\n");
-
- return len;
+ return 0;
}
-
/**
- * proc_rdrv_10()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_pdrv_ch0()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
- * Display real time information about the logical drives 0 through 9.
+ * Display information about the physical drives on physical channel 0.
*/
static int
-proc_rdrv_10(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_pdrv_ch0(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_rdrv(adapter, page, 0, 9));
+ return proc_show_pdrv(m, m->private, 0);
}
/**
- * proc_rdrv_20()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_pdrv_ch1()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
- * Display real time information about the logical drives 0 through 9.
+ * Display information about the physical drives on physical channel 1.
*/
static int
-proc_rdrv_20(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_pdrv_ch1(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_rdrv(adapter, page, 10, 19));
+ return proc_show_pdrv(m, m->private, 1);
}
/**
- * proc_rdrv_30()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_pdrv_ch2()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
- * Display real time information about the logical drives 0 through 9.
+ * Display information about the physical drives on physical channel 2.
*/
static int
-proc_rdrv_30(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_pdrv_ch2(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_rdrv(adapter, page, 20, 29));
+ return proc_show_pdrv(m, m->private, 2);
}
/**
- * proc_rdrv_40()
- * @page - buffer to write the data in
- * @start - where the actual data has been written in page
- * @offset - same meaning as the read system call
- * @count - same meaning as the read system call
- * @eof - set if no more data needs to be returned
- * @data - pointer to our soft state
+ * proc_show_pdrv_ch3()
+ * @m - Synthetic file construction data
+ * @v - File iterator
*
- * Display real time information about the logical drives 0 through 9.
+ * Display information about the physical drives on physical channel 3.
*/
static int
-proc_rdrv_40(char *page, char **start, off_t offset, int count, int *eof,
- void *data)
+proc_show_pdrv_ch3(struct seq_file *m, void *v)
{
- adapter_t *adapter = (adapter_t *)data;
-
- *eof = 1;
-
- return (proc_rdrv(adapter, page, 30, 39));
+ return proc_show_pdrv(m, m->private, 3);
}
/**
- * proc_rdrv()
- * @page - buffer to write the data in
+ * proc_show_rdrv()
+ * @m - Synthetic file construction data
* @adapter - pointer to our soft state
* @start - starting logical drive to display
* @end - ending logical drive to display
@@ -2875,7 +2525,7 @@ proc_rdrv_40(char *page, char **start, off_t offset, int count, int *eof,
* /proc/scsi/scsi interface
*/
static int
-proc_rdrv(adapter_t *adapter, char *page, int start, int end )
+proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
{
dma_addr_t dma_handle;
logdrv_param *lparam;
@@ -2887,29 +2537,18 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
u8 *rdrv_state;
int num_ldrv;
u32 array_sz;
- int len = 0;
int i;
- if( make_local_pdev(adapter, &pdev) != 0 ) {
- return len;
- }
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
- if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) {
- free_local_pdev(pdev);
- return len;
- }
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
-
- len = sprintf(page, "Adapter inquiry failed.\n");
-
+ seq_puts(m, "Adapter inquiry failed.\n");
printk(KERN_WARNING "megaraid: inquiry failed.\n");
-
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
- free_local_pdev(pdev);
-
- return len;
+ goto free_inquiry;
}
memset(&mc, 0, sizeof(megacmd_t));
@@ -2935,13 +2574,8 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
&disk_array_dma_handle);
if( disk_array == NULL ) {
- len = sprintf(page, "memory not available.\n");
-
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
- free_local_pdev(pdev);
-
- return len;
+ seq_puts(m, "memory not available.\n");
+ goto free_inquiry;
}
mc.xferaddr = (u32)disk_array_dma_handle;
@@ -2951,17 +2585,8 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
mc.opcode = OP_DCMD_READ_CONFIG;
if( mega_internal_command(adapter, &mc, NULL) ) {
-
- len = sprintf(page, "40LD read config failed.\n");
-
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
- pci_free_consistent(pdev, array_sz, disk_array,
- disk_array_dma_handle);
-
- free_local_pdev(pdev);
-
- return len;
+ seq_puts(m, "40LD read config failed.\n");
+ goto free_pci;
}
}
@@ -2969,24 +2594,10 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
mc.cmd = NEW_READ_CONFIG_8LD;
if( mega_internal_command(adapter, &mc, NULL) ) {
-
mc.cmd = READ_CONFIG_8LD;
-
- if( mega_internal_command(adapter, &mc,
- NULL) ){
-
- len = sprintf(page,
- "8LD read config failed.\n");
-
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
- pci_free_consistent(pdev, array_sz,
- disk_array,
- disk_array_dma_handle);
-
- free_local_pdev(pdev);
-
- return len;
+ if( mega_internal_command(adapter, &mc, NULL) ) {
+ seq_puts(m, "8LD read config failed.\n");
+ goto free_pci;
}
}
}
@@ -3006,29 +2617,23 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
* Check for overflow. We print less than 240 characters for
* information about each logical drive.
*/
- if( (len + 240) >= PAGE_SIZE ) break;
-
- len += sprintf(page+len, "Logical drive:%2d:, ", i);
+ seq_printf(m, "Logical drive:%2d:, ", i);
switch( rdrv_state[i] & 0x0F ) {
case RDRV_OFFLINE:
- len += sprintf(page+len, "state: offline");
+ seq_puts(m, "state: offline");
break;
-
case RDRV_DEGRADED:
- len += sprintf(page+len, "state: degraded");
+ seq_puts(m, "state: degraded");
break;
-
case RDRV_OPTIMAL:
- len += sprintf(page+len, "state: optimal");
+ seq_puts(m, "state: optimal");
break;
-
case RDRV_DELETED:
- len += sprintf(page+len, "state: deleted");
+ seq_puts(m, "state: deleted");
break;
-
default:
- len += sprintf(page+len, "state: unknown");
+ seq_puts(m, "state: unknown");
break;
}
@@ -3036,84 +2641,203 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
* Check if check consistency or initialization is going on
* for this logical drive.
*/
- if( (rdrv_state[i] & 0xF0) == 0x20 ) {
- len += sprintf(page+len,
- ", check-consistency in progress");
- }
- else if( (rdrv_state[i] & 0xF0) == 0x10 ) {
- len += sprintf(page+len,
- ", initialization in progress");
- }
+ if( (rdrv_state[i] & 0xF0) == 0x20 )
+ seq_puts(m, ", check-consistency in progress");
+ else if( (rdrv_state[i] & 0xF0) == 0x10 )
+ seq_puts(m, ", initialization in progress");
- len += sprintf(page+len, "\n");
-
- len += sprintf(page+len, "Span depth:%3d, ",
- lparam->span_depth);
-
- len += sprintf(page+len, "RAID level:%3d, ",
- lparam->level);
-
- len += sprintf(page+len, "Stripe size:%3d, ",
- lparam->stripe_sz ? lparam->stripe_sz/2: 128);
-
- len += sprintf(page+len, "Row size:%3d\n",
- lparam->row_size);
+ seq_putc(m, '\n');
+ seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
+ seq_printf(m, "RAID level:%3d, ", lparam->level);
+ seq_printf(m, "Stripe size:%3d, ",
+ lparam->stripe_sz ? lparam->stripe_sz/2: 128);
+ seq_printf(m, "Row size:%3d\n", lparam->row_size);
- len += sprintf(page+len, "Read Policy: ");
-
+ seq_puts(m, "Read Policy: ");
switch(lparam->read_ahead) {
-
case NO_READ_AHEAD:
- len += sprintf(page+len, "No read ahead, ");
+ seq_puts(m, "No read ahead, ");
break;
-
case READ_AHEAD:
- len += sprintf(page+len, "Read ahead, ");
+ seq_puts(m, "Read ahead, ");
break;
-
case ADAP_READ_AHEAD:
- len += sprintf(page+len, "Adaptive, ");
+ seq_puts(m, "Adaptive, ");
break;
}
- len += sprintf(page+len, "Write Policy: ");
-
+ seq_puts(m, "Write Policy: ");
switch(lparam->write_mode) {
-
case WRMODE_WRITE_THRU:
- len += sprintf(page+len, "Write thru, ");
+ seq_puts(m, "Write thru, ");
break;
-
case WRMODE_WRITE_BACK:
- len += sprintf(page+len, "Write back, ");
+ seq_puts(m, "Write back, ");
break;
}
- len += sprintf(page+len, "Cache Policy: ");
-
+ seq_puts(m, "Cache Policy: ");
switch(lparam->direct_io) {
-
case CACHED_IO:
- len += sprintf(page+len, "Cached IO\n\n");
+ seq_puts(m, "Cached IO\n\n");
break;
-
case DIRECT_IO:
- len += sprintf(page+len, "Direct IO\n\n");
+ seq_puts(m, "Direct IO\n\n");
break;
}
}
- mega_free_inquiry(inquiry, dma_handle, pdev);
-
+free_pci:
pci_free_consistent(pdev, array_sz, disk_array,
disk_array_dma_handle);
-
+free_inquiry:
+ mega_free_inquiry(inquiry, dma_handle, pdev);
+free_pdev:
free_local_pdev(pdev);
+ return 0;
+}
+
+/**
+ * proc_show_rdrv_10()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_10(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 0, 9);
+}
+
+
+/**
+ * proc_show_rdrv_20()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_20(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 10, 19);
+}
+
+
+/**
+ * proc_show_rdrv_30()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_30(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 20, 29);
+}
+
+
+/**
+ * proc_show_rdrv_40()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_40(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 30, 39);
+}
+
+
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int mega_proc_open(struct inode *inode, struct file *file)
+{
+ adapter_t *adapter = proc_get_parent_data(inode);
+ int (*show)(struct seq_file *, void *) = PDE_DATA(inode);
+
+ return single_open(file, show, adapter);
+}
+
+static const struct file_operations mega_proc_fops = {
+ .open = mega_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Table of proc files we need to create.
+ */
+struct mega_proc_file {
+ const char *name;
+ unsigned short ptr_offset;
+ int (*show) (struct seq_file *m, void *v);
+};
+
+static const struct mega_proc_file mega_proc_files[] = {
+ { "config", offsetof(adapter_t, proc_read), proc_show_config },
+ { "stat", offsetof(adapter_t, proc_stat), proc_show_stat },
+ { "mailbox", offsetof(adapter_t, proc_mbox), proc_show_mbox },
+#if MEGA_HAVE_ENH_PROC
+ { "rebuild-rate", offsetof(adapter_t, proc_rr), proc_show_rebuild_rate },
+ { "battery-status", offsetof(adapter_t, proc_battery), proc_show_battery },
+ { "diskdrives-ch0", offsetof(adapter_t, proc_pdrvstat[0]), proc_show_pdrv_ch0 },
+ { "diskdrives-ch1", offsetof(adapter_t, proc_pdrvstat[1]), proc_show_pdrv_ch1 },
+ { "diskdrives-ch2", offsetof(adapter_t, proc_pdrvstat[2]), proc_show_pdrv_ch2 },
+ { "diskdrives-ch3", offsetof(adapter_t, proc_pdrvstat[3]), proc_show_pdrv_ch3 },
+ { "raiddrives-0-9", offsetof(adapter_t, proc_rdrvstat[0]), proc_show_rdrv_10 },
+ { "raiddrives-10-19", offsetof(adapter_t, proc_rdrvstat[1]), proc_show_rdrv_20 },
+ { "raiddrives-20-29", offsetof(adapter_t, proc_rdrvstat[2]), proc_show_rdrv_30 },
+ { "raiddrives-30-39", offsetof(adapter_t, proc_rdrvstat[3]), proc_show_rdrv_40 },
+#endif
+ { NULL }
+};
- return len;
+/**
+ * mega_create_proc_entry()
+ * @index - index in soft state array
+ * @parent - parent node for this /proc entry
+ *
+ * Creates /proc entries for our controllers.
+ */
+static void
+mega_create_proc_entry(int index, struct proc_dir_entry *parent)
+{
+ const struct mega_proc_file *f;
+ adapter_t *adapter = hba_soft_state[index];
+ struct proc_dir_entry *dir, *de, **ppde;
+ u8 string[16];
+
+ sprintf(string, "hba%d", adapter->host->host_no);
+
+ dir = adapter->controller_proc_dir_entry =
+ proc_mkdir_data(string, 0, parent, adapter);
+ if(!dir) {
+ printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n");
+ return;
+ }
+
+ for (f = mega_proc_files; f->name; f++) {
+ de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops,
+ f->show);
+ if (!de) {
+ printk(KERN_WARNING "\nmegaraid: proc_create failed\n");
+ return;
+ }
+
+ ppde = (void *)adapter + f->ptr_offset;
+ *ppde = de;
+ }
}
+
#else
static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
{
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4fb2adf..4d0ce4e 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -987,24 +987,7 @@ static int mega_init_scb (adapter_t *);
static int mega_is_bios_enabled (adapter_t *);
#ifdef CONFIG_PROC_FS
-static int mega_print_inquiry(char *, char *);
static void mega_create_proc_entry(int, struct proc_dir_entry *);
-static int proc_read_config(char *, char **, off_t, int, int *, void *);
-static int proc_read_stat(char *, char **, off_t, int, int *, void *);
-static int proc_read_mbox(char *, char **, off_t, int, int *, void *);
-static int proc_rebuild_rate(char *, char **, off_t, int, int *, void *);
-static int proc_battery(char *, char **, off_t, int, int *, void *);
-static int proc_pdrv_ch0(char *, char **, off_t, int, int *, void *);
-static int proc_pdrv_ch1(char *, char **, off_t, int, int *, void *);
-static int proc_pdrv_ch2(char *, char **, off_t, int, int *, void *);
-static int proc_pdrv_ch3(char *, char **, off_t, int, int *, void *);
-static int proc_pdrv(adapter_t *, char *, int);
-static int proc_rdrv_10(char *, char **, off_t, int, int *, void *);
-static int proc_rdrv_20(char *, char **, off_t, int, int *, void *);
-static int proc_rdrv_30(char *, char **, off_t, int, int *, void *);
-static int proc_rdrv_40(char *, char **, off_t, int, int *, void *);
-static int proc_rdrv(adapter_t *, char *, int, int);
-
static int mega_adapinq(adapter_t *, dma_addr_t);
static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
#endif
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 08685c4..eec052c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -505,19 +505,6 @@ _ctl_fasync(int fd, struct file *filep, int mode)
}
/**
- * _ctl_release -
- * @inode -
- * @filep -
- *
- * Called when application releases the fasyn callback handler.
- */
-static int
-_ctl_release(struct inode *inode, struct file *filep)
-{
- return fasync_helper(-1, filep, 0, &async_queue);
-}
-
-/**
* _ctl_poll -
* @file -
* @wait -
@@ -3027,7 +3014,6 @@ struct device_attribute *mpt2sas_dev_attrs[] = {
static const struct file_operations ctl_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = _ctl_ioctl,
- .release = _ctl_release,
.poll = _ctl_poll,
.fasync = _ctl_fasync,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 8c2ffbe..193e7ae 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1939,7 +1939,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT2_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (req->bio->bi_vcnt > 1) {
+ if (bio_segments(req->bio) > 1) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1971,7 +1971,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_segments(rsp->bio) > 1) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2038,7 +2038,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (req->bio->bi_vcnt > 1) {
+ if (bio_segments(req->bio) > 1) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(req) - 4), pci_dma_out);
} else {
@@ -2054,7 +2054,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_segments(rsp->bio) > 1) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(rsp) + 4), pci_dma_in);
} else {
@@ -2099,7 +2099,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_segments(rsp->bio) > 1) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 054d523..0b402b6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -503,19 +503,6 @@ _ctl_fasync(int fd, struct file *filep, int mode)
}
/**
- * _ctl_release -
- * @inode -
- * @filep -
- *
- * Called when application releases the fasyn callback handler.
- */
-static int
-_ctl_release(struct inode *inode, struct file *filep)
-{
- return fasync_helper(-1, filep, 0, &async_queue);
-}
-
-/**
* _ctl_poll -
* @file -
* @wait -
@@ -3233,7 +3220,6 @@ struct device_attribute *mpt3sas_dev_attrs[] = {
static const struct file_operations ctl_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = _ctl_ioctl,
- .release = _ctl_release,
.poll = _ctl_poll,
.fasync = _ctl_fasync,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index c29d0db..e7f6661 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -76,7 +76,8 @@ int mvme147_detect(struct scsi_host_template *tpnt)
called++;
tpnt->proc_name = "MVME147";
- tpnt->proc_info = &wd33c93_proc_info;
+ tpnt->show_info = wd33c93_show_info,
+ tpnt->write_info = wd33c93_write_info,
instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
if (!instance)
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 1cc0c1c..1e3879d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -192,7 +192,7 @@ static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
-static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
+static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
static int nsp32_detect (struct pci_dev *pdev);
static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
@@ -268,7 +268,7 @@ static void nsp32_dmessage(const char *, int, int, char *, ...);
static struct scsi_host_template nsp32_template = {
.proc_name = "nsp32",
.name = "Workbit NinjaSCSI-32Bi/UDE",
- .proc_info = nsp32_proc_info,
+ .show_info = nsp32_show_info,
.info = nsp32_info,
.queuecommand = nsp32_queuecommand,
.can_queue = 1,
@@ -1442,19 +1442,10 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
}
#undef SPRINTF
-#define SPRINTF(args...) \
- do { \
- if(length > (pos - buffer)) { \
- pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \
- nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
- } \
- } while(0)
-
-static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
- off_t offset, int length, int inout)
+#define SPRINTF(args...) seq_printf(m, ##args)
+
+static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- char *pos = buffer;
- int thislength;
unsigned long flags;
nsp32_hw_data *data;
int hostno;
@@ -1463,11 +1454,6 @@ static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
int id, speed;
long model;
- /* Write is not supported, just return. */
- if (inout == TRUE) {
- return -EINVAL;
- }
-
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
@@ -1527,20 +1513,7 @@ static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
}
SPRINTF("\n");
}
-
-
- thislength = pos - (buffer + offset);
-
- if(thislength < 0) {
- *start = NULL;
- return 0;
- }
-
-
- thislength = min(thislength, length);
- *start = buffer + offset;
-
- return thislength;
+ return 0;
}
#undef SPRINTF
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 2f72c98..62f1a60 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -388,7 +388,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
int count;
tpnt->proc_name = "pas16";
- tpnt->proc_info = &pas16_proc_info;
+ tpnt->show_info = pas16_show_info;
+ tpnt->write_info = pas16_write_info;
if (pas16_addr != 0) {
overrides[0].io_port = pas16_addr;
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index a04281c..3721342 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -163,7 +163,8 @@ static int pas16_bus_reset(Scsi_Cmnd *);
#define NCR5380_queue_command pas16_queue_command
#define NCR5380_abort pas16_abort
#define NCR5380_bus_reset pas16_bus_reset
-#define NCR5380_proc_info pas16_proc_info
+#define NCR5380_show_info pas16_show_info
+#define NCR5380_write_info pas16_write_info
/* 15 14 12 10 7 5 3
1101 0100 1010 1000 */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index b61a753..987fbb1 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -76,7 +76,7 @@ MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0
static struct scsi_host_template nsp_driver_template = {
.proc_name = "nsp_cs",
- .proc_info = nsp_proc_info,
+ .show_info = nsp_show_info,
.name = "WorkBit NinjaSCSI-3/32Bi(16bit)",
.info = nsp_info,
.queuecommand = nsp_queuecommand,
@@ -1365,33 +1365,19 @@ static const char *nsp_info(struct Scsi_Host *shpnt)
}
#undef SPRINTF
-#define SPRINTF(args...) \
- do { \
- if(length > (pos - buffer)) { \
- pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \
- nsp_dbg(NSP_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
- } \
- } while(0)
-
-static int nsp_proc_info(struct Scsi_Host *host, char *buffer, char **start,
- off_t offset, int length, int inout)
+#define SPRINTF(args...) seq_printf(m, ##args)
+
+static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host)
{
int id;
- char *pos = buffer;
- int thislength;
int speed;
unsigned long flags;
nsp_hw_data *data;
int hostno;
- if (inout) {
- return -EINVAL;
- }
-
hostno = host->host_no;
data = (nsp_hw_data *)host->hostdata;
-
SPRINTF("NinjaSCSI status\n\n");
SPRINTF("Driver version: $Revision: 1.23 $\n");
SPRINTF("SCSI host No.: %d\n", hostno);
@@ -1458,19 +1444,7 @@ static int nsp_proc_info(struct Scsi_Host *host, char *buffer, char **start,
}
SPRINTF("\n");
}
-
- thislength = pos - (buffer + offset);
-
- if(thislength < 0) {
- *start = NULL;
- return 0;
- }
-
-
- thislength = min(thislength, length);
- *start = buffer + offset;
-
- return thislength;
+ return 0;
}
#undef SPRINTF
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 7fc9a9d..afd64f0 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -292,13 +292,8 @@ static int nsp_cs_config (struct pcmcia_device *link);
/* Linux SCSI subsystem specific functions */
static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht);
static const char *nsp_info (struct Scsi_Host *shpnt);
-static int nsp_proc_info (
- struct Scsi_Host *host,
- char *buffer,
- char **start,
- off_t offset,
- int length,
- int inout);
+static int nsp_show_info (struct seq_file *m,
+ struct Scsi_Host *host);
static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt);
/* Error handler */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b46f5e9..8e1b737 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3599,19 +3599,6 @@ static int pmcraid_chr_open(struct inode *inode, struct file *filep)
}
/**
- * pmcraid_release - char node "release" entry point
- */
-static int pmcraid_chr_release(struct inode *inode, struct file *filep)
-{
- struct pmcraid_instance *pinstance = filep->private_data;
-
- filep->private_data = NULL;
- fasync_helper(-1, filep, 0, &pinstance->aen_queue);
-
- return 0;
-}
-
-/**
* pmcraid_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
@@ -4167,7 +4154,6 @@ static long pmcraid_chr_ioctl(
static const struct file_operations pmcraid_fops = {
.owner = THIS_MODULE,
.open = pmcraid_chr_open,
- .release = pmcraid_chr_release,
.fasync = pmcraid_chr_fasync,
.unlocked_ioctl = pmcraid_chr_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index d164c96..1db8b26 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -118,8 +118,9 @@ static inline void ppa_pb_release(ppa_struct *dev)
* Also gives a method to use a script to obtain optimum timings (TODO)
*/
-static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length)
+static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length)
{
+ ppa_struct *dev = ppa_dev(host);
unsigned long x;
if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) {
@@ -137,35 +138,17 @@ static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length)
return -EINVAL;
}
-static int ppa_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout)
+static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- int len = 0;
ppa_struct *dev = ppa_dev(host);
- if (inout)
- return ppa_proc_write(dev, buffer, length);
-
- len += sprintf(buffer + len, "Version : %s\n", PPA_VERSION);
- len +=
- sprintf(buffer + len, "Parport : %s\n",
- dev->dev->port->name);
- len +=
- sprintf(buffer + len, "Mode : %s\n",
- PPA_MODE_STRING[dev->mode]);
+ seq_printf(m, "Version : %s\n", PPA_VERSION);
+ seq_printf(m, "Parport : %s\n", dev->dev->port->name);
+ seq_printf(m, "Mode : %s\n", PPA_MODE_STRING[dev->mode]);
#if PPA_DEBUG > 0
- len +=
- sprintf(buffer + len, "recon_tmo : %lu\n", dev->recon_tmo);
+ seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo);
#endif
-
- /* Request for beyond end of buffer */
- if (offset > length)
- return 0;
-
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- return len;
+ return 0;
}
static int device_check(ppa_struct *dev);
@@ -981,7 +964,8 @@ static int ppa_adjust_queue(struct scsi_device *device)
static struct scsi_host_template ppa_template = {
.module = THIS_MODULE,
.proc_name = "ppa",
- .proc_info = ppa_proc_info,
+ .show_info = ppa_show_info,
+ .write_info = ppa_write_info,
.name = "Iomega VPI0 (ppa) interface",
.queuecommand = ppa_queuecommand,
.eh_abort_handler = ppa_abort,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7b621c2..ad72c1d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5539,7 +5539,7 @@ static struct pci_driver qla2xxx_pci_driver = {
.err_handler = &qla2xxx_err_handler,
};
-static struct file_operations apidev_fops = {
+static const struct file_operations apidev_fops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 154d987..0a537a0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2828,31 +2828,27 @@ static const char * scsi_debug_info(struct Scsi_Host * shp)
/* scsi_debug_proc_info
* Used if the driver currently has no own support for /proc/scsi
*/
-static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
- int length, int inout)
+static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
{
- int len, pos, begin;
- int orig_length;
+ char arr[16];
+ int opts;
+ int minLen = length > 15 ? 15 : length;
- orig_length = length;
-
- if (inout == 1) {
- char arr[16];
- int minLen = length > 15 ? 15 : length;
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ memcpy(arr, buffer, minLen);
+ arr[minLen] = '\0';
+ if (1 != sscanf(arr, "%d", &opts))
+ return -EINVAL;
+ scsi_debug_opts = opts;
+ if (scsi_debug_every_nth != 0)
+ scsi_debug_cmnd_count = 0;
+ return length;
+}
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- return -EACCES;
- memcpy(arr, buffer, minLen);
- arr[minLen] = '\0';
- if (1 != sscanf(arr, "%d", &pos))
- return -EINVAL;
- scsi_debug_opts = pos;
- if (scsi_debug_every_nth != 0)
- scsi_debug_cmnd_count = 0;
- return length;
- }
- begin = 0;
- pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
+static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ seq_printf(m, "scsi_debug adapter driver, version "
"%s [%s]\n"
"num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
"every_nth=%d(curr:%d)\n"
@@ -2867,15 +2863,7 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
num_host_resets, dix_reads, dix_writes, dif_errors);
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
- *start = buffer + (offset - begin); /* Start of wanted data */
- len -= (offset - begin);
- if (len > length)
- len = length;
- return len;
+ return 0;
}
static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
@@ -3960,7 +3948,8 @@ write:
static DEF_SCSI_QCMD(scsi_debug_queuecommand)
static struct scsi_host_template sdebug_driver_template = {
- .proc_info = scsi_debug_proc_info,
+ .show_info = scsi_debug_show_info,
+ .write_info = scsi_debug_write_info,
.proc_name = sdebug_proc_name,
.name = "SCSI DEBUG",
.info = scsi_debug_info,
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index ad747dc..db66357 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -45,58 +45,50 @@ static struct proc_dir_entry *proc_scsi;
/* Protect sht->present and sht->proc_dir */
static DEFINE_MUTEX(global_host_template_mutex);
-/**
- * proc_scsi_read - handle read from /proc by calling host's proc_info() command
- * @buffer: passed to proc_info
- * @start: passed to proc_info
- * @offset: passed to proc_info
- * @length: passed to proc_info
- * @eof: returns whether length read was less than requested
- * @data: pointer to a &struct Scsi_Host
- */
-
-static int proc_scsi_read(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data)
-{
- struct Scsi_Host *shost = data;
- int n;
-
- n = shost->hostt->proc_info(shost, buffer, start, offset, length, 0);
- *eof = (n < length);
-
- return n;
-}
-
-/**
- * proc_scsi_write_proc - Handle write to /proc by calling host's proc_info()
- * @file: not used
- * @buf: source of data to write.
- * @count: number of bytes (at most PROC_BLOCK_SIZE) to write.
- * @data: pointer to &struct Scsi_Host
- */
-static int proc_scsi_write_proc(struct file *file, const char __user *buf,
- unsigned long count, void *data)
+static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct Scsi_Host *shost = data;
+ struct Scsi_Host *shost = PDE_DATA(file_inode(file));
ssize_t ret = -ENOMEM;
char *page;
- char *start;
if (count > PROC_BLOCK_SIZE)
return -EOVERFLOW;
+ if (!shost->hostt->write_info)
+ return -EINVAL;
+
page = (char *)__get_free_page(GFP_KERNEL);
if (page) {
ret = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
- ret = shost->hostt->proc_info(shost, page, &start, 0, count, 1);
+ ret = shost->hostt->write_info(shost, page, count);
}
out:
free_page((unsigned long)page);
return ret;
}
+static int proc_scsi_show(struct seq_file *m, void *v)
+{
+ struct Scsi_Host *shost = m->private;
+ return shost->hostt->show_info(m, shost);
+}
+
+static int proc_scsi_host_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
+ 4 * PAGE_SIZE);
+}
+
+static const struct file_operations proc_scsi_fops = {
+ .open = proc_scsi_host_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = proc_scsi_host_write
+};
+
/**
* scsi_proc_hostdir_add - Create directory in /proc for a scsi host
* @sht: owner of this directory
@@ -106,7 +98,7 @@ out:
void scsi_proc_hostdir_add(struct scsi_host_template *sht)
{
- if (!sht->proc_info)
+ if (!sht->show_info)
return;
mutex_lock(&global_host_template_mutex);
@@ -125,7 +117,7 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
*/
void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
{
- if (!sht->proc_info)
+ if (!sht->show_info)
return;
mutex_lock(&global_host_template_mutex);
@@ -151,16 +143,12 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
return;
sprintf(name,"%d", shost->host_no);
- p = create_proc_read_entry(name, S_IFREG | S_IRUGO | S_IWUSR,
- sht->proc_dir, proc_scsi_read, shost);
- if (!p) {
+ p = proc_create_data(name, S_IRUGO | S_IWUSR,
+ sht->proc_dir, &proc_scsi_fops, shost);
+ if (!p)
printk(KERN_ERR "%s: Failed to register host %d in"
"%s\n", __func__, shost->host_no,
sht->proc_name);
- return;
- }
-
- p->write_proc = proc_scsi_write_proc;
}
/**
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 82910cc..196240d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1203,7 +1203,7 @@ error_autopm:
*
* Locking: called with bdev->bd_mutex held.
**/
-static int sd_release(struct gendisk *disk, fmode_t mode)
+static void sd_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdev = sdkp->device;
@@ -1222,7 +1222,6 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
scsi_autopm_put_device(sdev);
scsi_disk_put(sdkp);
- return 0;
}
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9f0c465..df5e961 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -35,6 +35,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/aio.h>
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f2884ee..119d67f 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -541,14 +541,13 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int sr_block_release(struct gendisk *disk, fmode_t mode)
+static void sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
mutex_lock(&sr_mutex);
cdrom_release(&cd->cdi, mode);
scsi_cd_put(cd);
mutex_unlock(&sr_mutex);
- return 0;
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 7e12a2e..636bbe0 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -661,121 +661,94 @@ static void __init NCR5380_print_options (struct Scsi_Host *instance)
* Inputs : instance, pointer to this instance.
*/
-static void NCR5380_print_status (struct Scsi_Host *instance)
+static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
{
- char *pr_bfr;
- char *start;
- int len;
-
- NCR_PRINT(NDEBUG_ANY);
- NCR_PRINT_PHASE(NDEBUG_ANY);
-
- pr_bfr = (char *) __get_free_page(GFP_ATOMIC);
- if (!pr_bfr) {
- printk("NCR5380_print_status: no memory for print buffer\n");
- return;
- }
- len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0);
- pr_bfr[len] = 0;
- printk("\n%s\n", pr_bfr);
- free_page((unsigned long) pr_bfr);
+ int i, s;
+ unsigned char *command;
+ printk("scsi%d: destination target %d, lun %d\n",
+ H_NO(cmd), cmd->device->id, cmd->device->lun);
+ printk(KERN_CONT " command = ");
+ command = cmd->cmnd;
+ printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]);
+ for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ printk(KERN_CONT " %02x", command[i]);
+ printk("\n");
}
-
-/******************************************/
-/*
- * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
- *
- * *buffer: I/O buffer
- * **start: if inout == FALSE pointer into buffer where user read should start
- * offset: current offset
- * length: length of buffer
- * hostno: Scsi_Host host_no
- * inout: TRUE - user is writing; FALSE - user is reading
- *
- * Return the number of bytes read from or written
-*/
-
-#undef SPRINTF
-#define SPRINTF(fmt,args...) \
- do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \
- pos += sprintf(pos, fmt , ## args); } while(0)
-static
-char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer,
- int length);
-
-static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer,
- char **start, off_t offset, int length, int inout)
+static void NCR5380_print_status(struct Scsi_Host *instance)
{
- char *pos = buffer;
- struct NCR5380_hostdata *hostdata;
- struct scsi_cmnd *ptr;
- unsigned long flags;
- off_t begin = 0;
-#define check_offset() \
- do { \
- if (pos - buffer < offset - begin) { \
- begin += pos - buffer; \
- pos = buffer; \
- } \
- } while (0)
-
- hostdata = (struct NCR5380_hostdata *)instance->hostdata;
-
- if (inout) { /* Has data been written to the file ? */
- return(-ENOSYS); /* Currently this is a no-op */
- }
- SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
- check_offset();
- local_irq_save(flags);
- SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't");
- check_offset();
- if (!hostdata->connected)
- SPRINTF("scsi%d: no currently connected command\n", HOSTNO);
- else
- pos = lprint_Scsi_Cmnd ((struct scsi_cmnd *) hostdata->connected,
- pos, buffer, length);
- SPRINTF("scsi%d: issue_queue\n", HOSTNO);
- check_offset();
- for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr))
- {
- pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
- check_offset();
- }
-
- SPRINTF("scsi%d: disconnected_queue\n", HOSTNO);
- check_offset();
- for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
- ptr = NEXT(ptr)) {
- pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
- check_offset();
- }
+ struct NCR5380_hostdata *hostdata;
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ NCR_PRINT(NDEBUG_ANY);
+ NCR_PRINT_PHASE(NDEBUG_ANY);
+
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
+ local_irq_save(flags);
+ printk("NCR5380: coroutine is%s running.\n",
+ main_running ? "" : "n't");
+ if (!hostdata->connected)
+ printk("scsi%d: no currently connected command\n", HOSTNO);
+ else
+ lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected);
+ printk("scsi%d: issue_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
+ lprint_Scsi_Cmnd(ptr);
+
+ printk("scsi%d: disconnected_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = NEXT(ptr))
+ lprint_Scsi_Cmnd(ptr);
- local_irq_restore(flags);
- *start = buffer + (offset - begin);
- if (pos - buffer < offset - begin)
- return 0;
- else if (pos - buffer - (offset - begin) < length)
- return pos - buffer - (offset - begin);
- return length;
+ local_irq_restore(flags);
+ printk("\n");
}
-static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer,
- int length)
+static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
{
- int i, s;
- unsigned char *command;
- SPRINTF("scsi%d: destination target %d, lun %d\n",
- H_NO(cmd), cmd->device->id, cmd->device->lun);
- SPRINTF(" command = ");
- command = cmd->cmnd;
- SPRINTF("%2d (0x%02x)", command[0], command[0]);
- for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
- SPRINTF(" %02x", command[i]);
- SPRINTF("\n");
- return pos;
+ int i, s;
+ unsigned char *command;
+ seq_printf(m, "scsi%d: destination target %d, lun %d\n",
+ H_NO(cmd), cmd->device->id, cmd->device->lun);
+ seq_printf(m, " command = ");
+ command = cmd->cmnd;
+ seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
+ for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ seq_printf(m, " %02x", command[i]);
+ seq_printf(m, "\n");
}
+static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata;
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
+ local_irq_save(flags);
+ seq_printf(m, "NCR5380: coroutine is%s running.\n",
+ main_running ? "" : "n't");
+ if (!hostdata->connected)
+ seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
+ else
+ show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m);
+ seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
+ show_Scsi_Cmnd(ptr, m);
+
+ seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = NEXT(ptr))
+ show_Scsi_Cmnd(ptr, m);
+
+ local_irq_restore(flags);
+ return 0;
+}
/*
* Function : void NCR5380_init (struct Scsi_Host *instance)
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 6e25889..e2c009b 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -626,6 +626,7 @@ static int sun3scsi_dma_finish(int write_flag)
#include "sun3_NCR5380.c"
static struct scsi_host_template driver_template = {
+ .show_info = sun3scsi_show_info,
.name = SUN3_SCSI_NAME,
.detect = sun3scsi_detect,
.release = sun3scsi_release,
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
index bcefd84..a8da9c7 100644
--- a/drivers/scsi/sun3_scsi.h
+++ b/drivers/scsi/sun3_scsi.h
@@ -100,7 +100,7 @@ static int sun3scsi_release (struct Scsi_Host *);
#define NCR5380_queue_command sun3scsi_queue_command
#define NCR5380_bus_reset sun3scsi_bus_reset
#define NCR5380_abort sun3scsi_abort
-#define NCR5380_proc_info sun3scsi_proc_info
+#define NCR5380_show_info sun3scsi_show_info
#define NCR5380_dma_xfer_len(i, cmd, phase) \
sun3scsi_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 5995682..bac55f7 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1171,112 +1171,36 @@ printk("sym_user_command: data=%ld\n", uc->data);
#endif /* SYM_LINUX_USER_COMMAND_SUPPORT */
-#ifdef SYM_LINUX_USER_INFO_SUPPORT
-/*
- * Informations through the proc file system.
- */
-struct info_str {
- char *buffer;
- int length;
- int offset;
- int pos;
-};
-
-static void copy_mem_info(struct info_str *info, char *data, int len)
-{
- if (info->pos + len > info->length)
- len = info->length - info->pos;
-
- if (info->pos + len < info->offset) {
- info->pos += len;
- return;
- }
- if (info->pos < info->offset) {
- data += (info->offset - info->pos);
- len -= (info->offset - info->pos);
- }
-
- if (len > 0) {
- memcpy(info->buffer + info->pos, data, len);
- info->pos += len;
- }
-}
-
-static int copy_info(struct info_str *info, char *fmt, ...)
-{
- va_list args;
- char buf[81];
- int len;
-
- va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
- va_end(args);
-
- copy_mem_info(info, buf, len);
- return len;
-}
-
/*
* Copy formatted information into the input buffer.
*/
-static int sym_host_info(struct Scsi_Host *shost, char *ptr, off_t offset, int len)
+static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
+#ifdef SYM_LINUX_USER_INFO_SUPPORT
struct sym_data *sym_data = shost_priv(shost);
struct pci_dev *pdev = sym_data->pdev;
struct sym_hcb *np = sym_data->ncb;
- struct info_str info;
-
- info.buffer = ptr;
- info.length = len;
- info.offset = offset;
- info.pos = 0;
- copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, "
- "revision id 0x%x\n", np->s.chip_name,
- pdev->device, pdev->revision);
- copy_info(&info, "At PCI address %s, IRQ %u\n",
+ seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, "
+ "revision id 0x%x\n", np->s.chip_name,
+ pdev->device, pdev->revision);
+ seq_printf(m, "At PCI address %s, IRQ %u\n",
pci_name(pdev), pdev->irq);
- copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n",
- (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
- np->maxwide ? "Wide" : "Narrow",
- np->minsync_dt ? ", DT capable" : "");
+ seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n",
+ (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
+ np->maxwide ? "Wide" : "Narrow",
+ np->minsync_dt ? ", DT capable" : "");
- copy_info(&info, "Max. started commands %d, "
- "max. commands per LUN %d\n",
- SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
+ seq_printf(m, "Max. started commands %d, "
+ "max. commands per LUN %d\n",
+ SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
- return info.pos > info.offset? info.pos - info.offset : 0;
-}
-#endif /* SYM_LINUX_USER_INFO_SUPPORT */
-
-/*
- * Entry point of the scsi proc fs of the driver.
- * - func = 0 means read (returns adapter infos)
- * - func = 1 means write (not yet merget from sym53c8xx)
- */
-static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer,
- char **start, off_t offset, int length, int func)
-{
- int retv;
-
- if (func) {
-#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
- retv = sym_user_command(shost, buffer, length);
-#else
- retv = -EINVAL;
-#endif
- } else {
- if (start)
- *start = buffer;
-#ifdef SYM_LINUX_USER_INFO_SUPPORT
- retv = sym_host_info(shost, buffer, offset, length);
+ return 0;
#else
- retv = -EINVAL;
-#endif
- }
-
- return retv;
+ return -EINVAL;
+#endif /* SYM_LINUX_USER_INFO_SUPPORT */
}
+
#endif /* SYM_LINUX_PROC_INFO_SUPPORT */
/*
@@ -1742,7 +1666,10 @@ static struct scsi_host_template sym2_template = {
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xFFFF,
#ifdef SYM_LINUX_PROC_INFO_SUPPORT
- .proc_info = sym53c8xx_proc_info,
+ .show_info = sym_show_info,
+#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
+ .write_info = sym_user_command,
+#endif
.proc_name = NAME53C8XX,
#endif
};
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index d672d97..f1e4b41 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -201,7 +201,8 @@ int __init t128_detect(struct scsi_host_template * tpnt){
int sig, count;
tpnt->proc_name = "t128";
- tpnt->proc_info = &t128_proc_info;
+ tpnt->show_info = t128_show_info;
+ tpnt->write_info = t128_write_info;
for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
base = 0;
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index ada1115..1df82c28 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -140,7 +140,8 @@ static int t128_bus_reset(struct scsi_cmnd *);
#define NCR5380_queue_command t128_queue_command
#define NCR5380_abort t128_abort
#define NCR5380_bus_reset t128_bus_reset
-#define NCR5380_proc_info t128_proc_info
+#define NCR5380_show_info t128_show_info
+#define NCR5380_write_info t128_write_info
/* 15 14 12 10 7 5 3
1101 0100 1010 1000 */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3449a1f..2168258 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mempool.h>
@@ -20,12 +22,14 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/virtio_scsi.h>
+#include <linux/cpu.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#define VIRTIO_SCSI_MEMPOOL_SZ 64
#define VIRTIO_SCSI_EVENT_LEN 8
+#define VIRTIO_SCSI_VQ_BASE 2
/* Command queue element */
struct virtio_scsi_cmd {
@@ -57,27 +61,61 @@ struct virtio_scsi_vq {
struct virtqueue *vq;
};
-/* Per-target queue state */
+/*
+ * Per-target queue state.
+ *
+ * This struct holds the data needed by the queue steering policy. When a
+ * target is sent multiple requests, we need to drive them to the same queue so
+ * that FIFO processing order is kept. However, if a target was idle, we can
+ * choose a queue arbitrarily. In this case the queue is chosen according to
+ * the current VCPU, so the driver expects the number of request queues to be
+ * equal to the number of VCPUs. This makes it easy and fast to select the
+ * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
+ * (each virtqueue's affinity is set to the CPU that "owns" the queue).
+ *
+ * An interesting effect of this policy is that only writes to req_vq need to
+ * take the tgt_lock. Read can be done outside the lock because:
+ *
+ * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
+ * In that case, no other CPU is reading req_vq: even if they were in
+ * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
+ *
+ * - reads of req_vq only occur when the target is not idle (reqs != 0).
+ * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
+ *
+ * Similarly, decrements of reqs are never concurrent with writes of req_vq.
+ * Thus they can happen outside the tgt_lock, provided of course we make reqs
+ * an atomic_t.
+ */
struct virtio_scsi_target_state {
- /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */
+ /* This spinlock never held at the same time as vq_lock. */
spinlock_t tgt_lock;
- /* For sglist construction when adding commands to the virtqueue. */
- struct scatterlist sg[];
+ /* Count of outstanding requests. */
+ atomic_t reqs;
+
+ /* Currently active virtqueue for requests sent to this target. */
+ struct virtio_scsi_vq *req_vq;
};
/* Driver instance state */
struct virtio_scsi {
struct virtio_device *vdev;
- struct virtio_scsi_vq ctrl_vq;
- struct virtio_scsi_vq event_vq;
- struct virtio_scsi_vq req_vq;
-
/* Get some buffers ready for event vq */
struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
- struct virtio_scsi_target_state *tgt[];
+ u32 num_queues;
+
+ /* If the affinity hint is set for virtqueues */
+ bool affinity_hint_set;
+
+ /* CPU hotplug notifier */
+ struct notifier_block nb;
+
+ struct virtio_scsi_vq ctrl_vq;
+ struct virtio_scsi_vq event_vq;
+ struct virtio_scsi_vq req_vqs[];
};
static struct kmem_cache *virtscsi_cmd_cache;
@@ -107,11 +145,13 @@ static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
*
* Called with vq_lock held.
*/
-static void virtscsi_complete_cmd(void *buf)
+static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
+ struct virtio_scsi_target_state *tgt =
+ scsi_target(sc->device)->hostdata;
dev_dbg(&sc->device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
@@ -166,32 +206,71 @@ static void virtscsi_complete_cmd(void *buf)
mempool_free(cmd, virtscsi_cmd_pool);
sc->scsi_done(sc);
+
+ atomic_dec(&tgt->reqs);
}
-static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
+static void virtscsi_vq_done(struct virtio_scsi *vscsi,
+ struct virtio_scsi_vq *virtscsi_vq,
+ void (*fn)(struct virtio_scsi *vscsi, void *buf))
{
void *buf;
unsigned int len;
+ unsigned long flags;
+ struct virtqueue *vq = virtscsi_vq->vq;
+ spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
do {
virtqueue_disable_cb(vq);
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
- fn(buf);
+ fn(vscsi, buf);
} while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
static void virtscsi_req_done(struct virtqueue *vq)
{
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- unsigned long flags;
+ int index = vq->index - VIRTIO_SCSI_VQ_BASE;
+ struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
- spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
- virtscsi_vq_done(vq, virtscsi_complete_cmd);
- spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
+ /*
+ * Read req_vq before decrementing the reqs field in
+ * virtscsi_complete_cmd.
+ *
+ * With barriers:
+ *
+ * CPU #0 virtscsi_queuecommand_multi (CPU #1)
+ * ------------------------------------------------------------
+ * lock vq_lock
+ * read req_vq
+ * read reqs (reqs = 1)
+ * write reqs (reqs = 0)
+ * increment reqs (reqs = 1)
+ * write req_vq
+ *
+ * Possible reordering without barriers:
+ *
+ * CPU #0 virtscsi_queuecommand_multi (CPU #1)
+ * ------------------------------------------------------------
+ * lock vq_lock
+ * read reqs (reqs = 1)
+ * write reqs (reqs = 0)
+ * increment reqs (reqs = 1)
+ * write req_vq
+ * read (wrong) req_vq
+ *
+ * We do not need a full smp_rmb, because req_vq is required to get
+ * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
+ * in the virtqueue as the user token.
+ */
+ smp_read_barrier_depends();
+
+ virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
};
-static void virtscsi_complete_free(void *buf)
+static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_cmd *cmd = buf;
@@ -205,11 +284,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
{
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- unsigned long flags;
- spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
- virtscsi_vq_done(vq, virtscsi_complete_free);
- spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
+ virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
};
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
@@ -223,8 +299,8 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
- err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node,
- GFP_ATOMIC);
+ err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
+ GFP_ATOMIC);
if (!err)
virtqueue_kick(vscsi->event_vq.vq);
@@ -254,7 +330,7 @@ static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
}
static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
- struct virtio_scsi_event *event)
+ struct virtio_scsi_event *event)
{
struct scsi_device *sdev;
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
@@ -332,7 +408,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_kick_event(vscsi, event_node);
}
-static void virtscsi_complete_event(void *buf)
+static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_event_node *event_node = buf;
@@ -344,82 +420,65 @@ static void virtscsi_event_done(struct virtqueue *vq)
{
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- unsigned long flags;
- spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
- virtscsi_vq_done(vq, virtscsi_complete_event);
- spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
+ virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
};
-static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
- struct scsi_data_buffer *sdb)
-{
- struct sg_table *table = &sdb->table;
- struct scatterlist *sg_elem;
- unsigned int idx = *p_idx;
- int i;
-
- for_each_sg(table->sgl, sg_elem, table->nents, i)
- sg[idx++] = *sg_elem;
-
- *p_idx = idx;
-}
-
/**
- * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
- * @vscsi : virtio_scsi state
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
+ * @vq : the struct virtqueue we're talking about
* @cmd : command structure
- * @out_num : number of read-only elements
- * @in_num : number of write-only elements
* @req_size : size of the request buffer
* @resp_size : size of the response buffer
- *
- * Called with tgt_lock held.
+ * @gfp : flags to use for memory allocations
*/
-static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
- struct virtio_scsi_cmd *cmd,
- unsigned *out_num, unsigned *in_num,
- size_t req_size, size_t resp_size)
+static int virtscsi_add_cmd(struct virtqueue *vq,
+ struct virtio_scsi_cmd *cmd,
+ size_t req_size, size_t resp_size, gfp_t gfp)
{
struct scsi_cmnd *sc = cmd->sc;
- struct scatterlist *sg = tgt->sg;
- unsigned int idx = 0;
+ struct scatterlist *sgs[4], req, resp;
+ struct sg_table *out, *in;
+ unsigned out_num = 0, in_num = 0;
+
+ out = in = NULL;
+
+ if (sc && sc->sc_data_direction != DMA_NONE) {
+ if (sc->sc_data_direction != DMA_FROM_DEVICE)
+ out = &scsi_out(sc)->table;
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ in = &scsi_in(sc)->table;
+ }
/* Request header. */
- sg_set_buf(&sg[idx++], &cmd->req, req_size);
+ sg_init_one(&req, &cmd->req, req_size);
+ sgs[out_num++] = &req;
/* Data-out buffer. */
- if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
- virtscsi_map_sgl(sg, &idx, scsi_out(sc));
-
- *out_num = idx;
+ if (out)
+ sgs[out_num++] = out->sgl;
/* Response header. */
- sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
+ sg_init_one(&resp, &cmd->resp, resp_size);
+ sgs[out_num + in_num++] = &resp;
/* Data-in buffer */
- if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
- virtscsi_map_sgl(sg, &idx, scsi_in(sc));
+ if (in)
+ sgs[out_num + in_num++] = in->sgl;
- *in_num = idx - *out_num;
+ return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
}
-static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
- struct virtio_scsi_vq *vq,
+static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
size_t req_size, size_t resp_size, gfp_t gfp)
{
- unsigned int out_num, in_num;
unsigned long flags;
int err;
bool needs_kick = false;
- spin_lock_irqsave(&tgt->tgt_lock, flags);
- virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
-
- spin_lock(&vq->vq_lock);
- err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
- spin_unlock(&tgt->tgt_lock);
+ spin_lock_irqsave(&vq->vq_lock, flags);
+ err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
if (!err)
needs_kick = virtqueue_kick_prepare(vq->vq);
@@ -430,10 +489,10 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
return err;
}
-static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
+static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
+ struct virtio_scsi_vq *req_vq,
+ struct scsi_cmnd *sc)
{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
struct virtio_scsi_cmd *cmd;
int ret;
@@ -467,7 +526,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
- if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
+ if (virtscsi_kick_cmd(req_vq, cmd,
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
GFP_ATOMIC) == 0)
ret = 0;
@@ -478,14 +537,62 @@ out:
return ret;
}
+static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
+ struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi_target_state *tgt =
+ scsi_target(sc->device)->hostdata;
+
+ atomic_inc(&tgt->reqs);
+ return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
+}
+
+static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
+ struct virtio_scsi_target_state *tgt)
+{
+ struct virtio_scsi_vq *vq;
+ unsigned long flags;
+ u32 queue_num;
+
+ spin_lock_irqsave(&tgt->tgt_lock, flags);
+
+ /*
+ * The memory barrier after atomic_inc_return matches
+ * the smp_read_barrier_depends() in virtscsi_req_done.
+ */
+ if (atomic_inc_return(&tgt->reqs) > 1)
+ vq = ACCESS_ONCE(tgt->req_vq);
+ else {
+ queue_num = smp_processor_id();
+ while (unlikely(queue_num >= vscsi->num_queues))
+ queue_num -= vscsi->num_queues;
+
+ tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
+ }
+
+ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
+ return vq;
+}
+
+static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
+ struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi_target_state *tgt =
+ scsi_target(sc->device)->hostdata;
+ struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
+
+ return virtscsi_queuecommand(vscsi, req_vq, sc);
+}
+
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
{
DECLARE_COMPLETION_ONSTACK(comp);
- struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
int ret = FAILED;
cmd->comp = &comp;
- if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
+ if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
GFP_NOIO) < 0)
goto out;
@@ -547,18 +654,57 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static struct scsi_host_template virtscsi_host_template = {
+static int virtscsi_target_alloc(struct scsi_target *starget)
+{
+ struct virtio_scsi_target_state *tgt =
+ kmalloc(sizeof(*tgt), GFP_KERNEL);
+ if (!tgt)
+ return -ENOMEM;
+
+ spin_lock_init(&tgt->tgt_lock);
+ atomic_set(&tgt->reqs, 0);
+ tgt->req_vq = NULL;
+
+ starget->hostdata = tgt;
+ return 0;
+}
+
+static void virtscsi_target_destroy(struct scsi_target *starget)
+{
+ struct virtio_scsi_target_state *tgt = starget->hostdata;
+ kfree(tgt);
+}
+
+static struct scsi_host_template virtscsi_host_template_single = {
+ .module = THIS_MODULE,
+ .name = "Virtio SCSI HBA",
+ .proc_name = "virtio_scsi",
+ .this_id = -1,
+ .queuecommand = virtscsi_queuecommand_single,
+ .eh_abort_handler = virtscsi_abort,
+ .eh_device_reset_handler = virtscsi_device_reset,
+
+ .can_queue = 1024,
+ .dma_boundary = UINT_MAX,
+ .use_clustering = ENABLE_CLUSTERING,
+ .target_alloc = virtscsi_target_alloc,
+ .target_destroy = virtscsi_target_destroy,
+};
+
+static struct scsi_host_template virtscsi_host_template_multi = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
- .queuecommand = virtscsi_queuecommand,
.this_id = -1,
+ .queuecommand = virtscsi_queuecommand_multi,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
.use_clustering = ENABLE_CLUSTERING,
+ .target_alloc = virtscsi_target_alloc,
+ .target_destroy = virtscsi_target_destroy,
};
#define virtscsi_config_get(vdev, fld) \
@@ -578,29 +724,69 @@ static struct scsi_host_template virtscsi_host_template = {
&__val, sizeof(__val)); \
})
-static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
- struct virtqueue *vq)
+static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
{
- spin_lock_init(&virtscsi_vq->vq_lock);
- virtscsi_vq->vq = vq;
+ int i;
+ int cpu;
+
+ /* In multiqueue mode, when the number of cpu is equal
+ * to the number of request queues, we let the qeueues
+ * to be private to one cpu by setting the affinity hint
+ * to eliminate the contention.
+ */
+ if ((vscsi->num_queues == 1 ||
+ vscsi->num_queues != num_online_cpus()) && affinity) {
+ if (vscsi->affinity_hint_set)
+ affinity = false;
+ else
+ return;
+ }
+
+ if (affinity) {
+ i = 0;
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
+ i++;
+ }
+
+ vscsi->affinity_hint_set = true;
+ } else {
+ for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
+ virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+
+ vscsi->affinity_hint_set = false;
+ }
}
-static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
- struct virtio_device *vdev, int sg_elems)
+static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
{
- struct virtio_scsi_target_state *tgt;
- gfp_t gfp_mask = GFP_KERNEL;
-
- /* We need extra sg elements at head and tail. */
- tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
- gfp_mask);
+ get_online_cpus();
+ __virtscsi_set_affinity(vscsi, affinity);
+ put_online_cpus();
+}
- if (!tgt)
- return NULL;
+static int virtscsi_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
+ switch(action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ __virtscsi_set_affinity(vscsi, true);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- spin_lock_init(&tgt->tgt_lock);
- sg_init_table(tgt->sg, sg_elems + 2);
- return tgt;
+static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
+ struct virtqueue *vq)
+{
+ spin_lock_init(&virtscsi_vq->vq_lock);
+ virtscsi_vq->vq = vq;
}
static void virtscsi_scan(struct virtio_device *vdev)
@@ -614,46 +800,56 @@ static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- u32 i, num_targets;
+
+ virtscsi_set_affinity(vscsi, false);
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- num_targets = sh->max_id;
- for (i = 0; i < num_targets; i++) {
- kfree(vscsi->tgt[i]);
- vscsi->tgt[i] = NULL;
- }
-
vdev->config->del_vqs(vdev);
}
static int virtscsi_init(struct virtio_device *vdev,
- struct virtio_scsi *vscsi, int num_targets)
+ struct virtio_scsi *vscsi)
{
int err;
- struct virtqueue *vqs[3];
- u32 i, sg_elems;
+ u32 i;
+ u32 num_vqs;
+ vq_callback_t **callbacks;
+ const char **names;
+ struct virtqueue **vqs;
+
+ num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
+ vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
+ callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
+ names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
+
+ if (!callbacks || !vqs || !names) {
+ err = -ENOMEM;
+ goto out;
+ }
- vq_callback_t *callbacks[] = {
- virtscsi_ctrl_done,
- virtscsi_event_done,
- virtscsi_req_done
- };
- const char *names[] = {
- "control",
- "event",
- "request"
- };
+ callbacks[0] = virtscsi_ctrl_done;
+ callbacks[1] = virtscsi_event_done;
+ names[0] = "control";
+ names[1] = "event";
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
+ callbacks[i] = virtscsi_req_done;
+ names[i] = "request";
+ }
/* Discover virtqueues and write information to configuration. */
- err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
+ err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
if (err)
- return err;
+ goto out;
virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
- virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
+ virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
+ vqs[i]);
+
+ virtscsi_set_affinity(vscsi, true);
virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
@@ -661,19 +857,12 @@ static int virtscsi_init(struct virtio_device *vdev,
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
virtscsi_kick_event_all(vscsi);
- /* We need to know how many segments before we allocate. */
- sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
-
- for (i = 0; i < num_targets; i++) {
- vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
- if (!vscsi->tgt[i]) {
- err = -ENOMEM;
- goto out;
- }
- }
err = 0;
out:
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
if (err)
virtscsi_remove_vqs(vdev);
return err;
@@ -686,13 +875,21 @@ static int virtscsi_probe(struct virtio_device *vdev)
int err;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
+ u32 num_queues;
+ struct scsi_host_template *hostt;
+
+ /* We need to know how many queues before we allocate. */
+ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
- /* Allocate memory and link the structs together. */
num_targets = virtscsi_config_get(vdev, max_target) + 1;
- shost = scsi_host_alloc(&virtscsi_host_template,
- sizeof(*vscsi)
- + num_targets * sizeof(struct virtio_scsi_target_state));
+ if (num_queues == 1)
+ hostt = &virtscsi_host_template_single;
+ else
+ hostt = &virtscsi_host_template_multi;
+
+ shost = scsi_host_alloc(hostt,
+ sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
if (!shost)
return -ENOMEM;
@@ -700,12 +897,20 @@ static int virtscsi_probe(struct virtio_device *vdev)
shost->sg_tablesize = sg_elems;
vscsi = shost_priv(shost);
vscsi->vdev = vdev;
+ vscsi->num_queues = num_queues;
vdev->priv = shost;
- err = virtscsi_init(vdev, vscsi, num_targets);
+ err = virtscsi_init(vdev, vscsi);
if (err)
goto virtscsi_init_failed;
+ vscsi->nb.notifier_call = &virtscsi_cpu_callback;
+ err = register_hotcpu_notifier(&vscsi->nb);
+ if (err) {
+ pr_err("registering cpu notifier failed\n");
+ goto scsi_add_host_failed;
+ }
+
cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
@@ -743,6 +948,8 @@ static void virtscsi_remove(struct virtio_device *vdev)
scsi_remove_host(shost);
+ unregister_hotcpu_notifier(&vscsi->nb);
+
virtscsi_remove_vqs(vdev);
scsi_host_put(shost);
}
@@ -759,7 +966,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- return virtscsi_init(vdev, vscsi, sh->max_id);
+ return virtscsi_init(vdev, vscsi);
}
#endif
@@ -794,8 +1001,7 @@ static int __init init(void)
virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
if (!virtscsi_cmd_cache) {
- printk(KERN_ERR "kmem_cache_create() for "
- "virtscsi_cmd_cache failed\n");
+ pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
goto error;
}
@@ -804,8 +1010,7 @@ static int __init init(void)
mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
virtscsi_cmd_cache);
if (!virtscsi_cmd_pool) {
- printk(KERN_ERR "mempool_create() for"
- "virtscsi_cmd_pool failed\n");
+ pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
goto error;
}
ret = register_virtio_driver(&virtio_scsi_driver);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index c0ee4ea..41883a8 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2054,22 +2054,16 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs,
printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE);
}
-int
-wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in)
+int wd33c93_write_info(struct Scsi_Host *instance, char *buf, int len)
{
-
#ifdef PROC_INTERFACE
-
char *bp;
- char tbuf[128];
struct WD33C93_hostdata *hd;
- struct scsi_cmnd *cmd;
int x;
- static int stop = 0;
hd = (struct WD33C93_hostdata *) instance->hostdata;
-/* If 'in' is TRUE we need to _read_ the proc file. We accept the following
+/* We accept the following
* keywords (same format as command-line, but arguments are not optional):
* debug
* disconnect
@@ -2083,145 +2077,124 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
* nosync
*/
- if (in) {
- buf[len] = '\0';
- for (bp = buf; *bp; ) {
- while (',' == *bp || ' ' == *bp)
- ++bp;
- if (!strncmp(bp, "debug:", 6)) {
- hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK;
- } else if (!strncmp(bp, "disconnect:", 11)) {
- x = simple_strtoul(bp+11, &bp, 0);
- if (x < DIS_NEVER || x > DIS_ALWAYS)
- x = DIS_ADAPTIVE;
- hd->disconnect = x;
- } else if (!strncmp(bp, "period:", 7)) {
+ buf[len] = '\0';
+ for (bp = buf; *bp; ) {
+ while (',' == *bp || ' ' == *bp)
+ ++bp;
+ if (!strncmp(bp, "debug:", 6)) {
+ hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK;
+ } else if (!strncmp(bp, "disconnect:", 11)) {
+ x = simple_strtoul(bp+11, &bp, 0);
+ if (x < DIS_NEVER || x > DIS_ALWAYS)
+ x = DIS_ADAPTIVE;
+ hd->disconnect = x;
+ } else if (!strncmp(bp, "period:", 7)) {
+ x = simple_strtoul(bp+7, &bp, 0);
+ hd->default_sx_per =
+ hd->sx_table[round_period((unsigned int) x,
+ hd->sx_table)].period_ns;
+ } else if (!strncmp(bp, "resync:", 7)) {
+ set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0));
+ } else if (!strncmp(bp, "proc:", 5)) {
+ hd->proc = simple_strtoul(bp+5, &bp, 0);
+ } else if (!strncmp(bp, "nodma:", 6)) {
+ hd->no_dma = simple_strtoul(bp+6, &bp, 0);
+ } else if (!strncmp(bp, "level2:", 7)) {
+ hd->level2 = simple_strtoul(bp+7, &bp, 0);
+ } else if (!strncmp(bp, "burst:", 6)) {
+ hd->dma_mode =
+ simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA;
+ } else if (!strncmp(bp, "fast:", 5)) {
+ x = !!simple_strtol(bp+5, &bp, 0);
+ if (x != hd->fast)
+ set_resync(hd, 0xff);
+ hd->fast = x;
+ } else if (!strncmp(bp, "nosync:", 7)) {
x = simple_strtoul(bp+7, &bp, 0);
- hd->default_sx_per =
- hd->sx_table[round_period((unsigned int) x,
- hd->sx_table)].period_ns;
- } else if (!strncmp(bp, "resync:", 7)) {
- set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0));
- } else if (!strncmp(bp, "proc:", 5)) {
- hd->proc = simple_strtoul(bp+5, &bp, 0);
- } else if (!strncmp(bp, "nodma:", 6)) {
- hd->no_dma = simple_strtoul(bp+6, &bp, 0);
- } else if (!strncmp(bp, "level2:", 7)) {
- hd->level2 = simple_strtoul(bp+7, &bp, 0);
- } else if (!strncmp(bp, "burst:", 6)) {
- hd->dma_mode =
- simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA;
- } else if (!strncmp(bp, "fast:", 5)) {
- x = !!simple_strtol(bp+5, &bp, 0);
- if (x != hd->fast)
- set_resync(hd, 0xff);
- hd->fast = x;
- } else if (!strncmp(bp, "nosync:", 7)) {
- x = simple_strtoul(bp+7, &bp, 0);
- set_resync(hd, x ^ hd->no_sync);
- hd->no_sync = x;
- } else {
- break; /* unknown keyword,syntax-error,... */
- }
+ set_resync(hd, x ^ hd->no_sync);
+ hd->no_sync = x;
+ } else {
+ break; /* unknown keyword,syntax-error,... */
}
- return len;
}
+ return len;
+#else
+ return 0;
+#endif
+}
+
+int
+wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+#ifdef PROC_INTERFACE
+ struct WD33C93_hostdata *hd;
+ struct scsi_cmnd *cmd;
+ int x;
+
+ hd = (struct WD33C93_hostdata *) instance->hostdata;
spin_lock_irq(&hd->lock);
- bp = buf;
- *bp = '\0';
- if (hd->proc & PR_VERSION) {
- sprintf(tbuf, "\nVersion %s - %s.",
+ if (hd->proc & PR_VERSION)
+ seq_printf(m, "\nVersion %s - %s.",
WD33C93_VERSION, WD33C93_DATE);
- strcat(bp, tbuf);
- }
+
if (hd->proc & PR_INFO) {
- sprintf(tbuf, "\nclock_freq=%02x no_sync=%02x no_dma=%d"
+ seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d"
" dma_mode=%02x fast=%d",
hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast);
- strcat(bp, tbuf);
- strcat(bp, "\nsync_xfer[] = ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%02x", hd->sync_xfer[x]);
- strcat(bp, tbuf);
- }
- strcat(bp, "\nsync_stat[] = ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%02x", hd->sync_stat[x]);
- strcat(bp, tbuf);
- }
+ seq_printf(m, "\nsync_xfer[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_xfer[x]);
+ seq_printf(m, "\nsync_stat[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_stat[x]);
}
#ifdef PROC_STATISTICS
if (hd->proc & PR_STATISTICS) {
- strcat(bp, "\ncommands issued: ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]);
- strcat(bp, tbuf);
- }
- strcat(bp, "\ndisconnects allowed:");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]);
- strcat(bp, tbuf);
- }
- strcat(bp, "\ndisconnects done: ");
- for (x = 0; x < 7; x++) {
- sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]);
- strcat(bp, tbuf);
- }
- sprintf(tbuf,
+ seq_printf(m, "\ncommands issued: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
+ seq_printf(m, "\ndisconnects allowed:");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
+ seq_printf(m, "\ndisconnects done: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
+ seq_printf(m,
"\ninterrupts: %ld, DATA_PHASE ints: %ld DMA, %ld PIO",
hd->int_cnt, hd->dma_cnt, hd->pio_cnt);
- strcat(bp, tbuf);
}
#endif
if (hd->proc & PR_CONNECTED) {
- strcat(bp, "\nconnected: ");
+ seq_printf(m, "\nconnected: ");
if (hd->connected) {
cmd = (struct scsi_cmnd *) hd->connected;
- sprintf(tbuf, " %d:%d(%02x)",
+ seq_printf(m, " %d:%d(%02x)",
cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- strcat(bp, tbuf);
}
}
if (hd->proc & PR_INPUTQ) {
- strcat(bp, "\ninput_Q: ");
+ seq_printf(m, "\ninput_Q: ");
cmd = (struct scsi_cmnd *) hd->input_Q;
while (cmd) {
- sprintf(tbuf, " %d:%d(%02x)",
+ seq_printf(m, " %d:%d(%02x)",
cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- strcat(bp, tbuf);
cmd = (struct scsi_cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_DISCQ) {
- strcat(bp, "\ndisconnected_Q:");
+ seq_printf(m, "\ndisconnected_Q:");
cmd = (struct scsi_cmnd *) hd->disconnected_Q;
while (cmd) {
- sprintf(tbuf, " %d:%d(%02x)",
+ seq_printf(m, " %d:%d(%02x)",
cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- strcat(bp, tbuf);
cmd = (struct scsi_cmnd *) cmd->host_scribble;
}
}
- strcat(bp, "\n");
+ seq_printf(m, "\n");
spin_unlock_irq(&hd->lock);
- *start = buf;
- if (stop) {
- stop = 0;
- return 0;
- }
- if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */
- stop = 1;
- if (hd->proc & PR_STOP) /* stop every other time */
- stop = 1;
- return strlen(bp);
-
-#else /* PROC_INTERFACE */
-
- return 0;
-
#endif /* PROC_INTERFACE */
-
+ return 0;
}
EXPORT_SYMBOL(wd33c93_host_reset);
@@ -2229,4 +2202,5 @@ EXPORT_SYMBOL(wd33c93_init);
EXPORT_SYMBOL(wd33c93_abort);
EXPORT_SYMBOL(wd33c93_queuecommand);
EXPORT_SYMBOL(wd33c93_intr);
-EXPORT_SYMBOL(wd33c93_proc_info);
+EXPORT_SYMBOL(wd33c93_show_info);
+EXPORT_SYMBOL(wd33c93_write_info);
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 3b463d7..08abe50 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -345,7 +345,8 @@ void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs,
int wd33c93_abort (struct scsi_cmnd *cmd);
int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd);
void wd33c93_intr (struct Scsi_Host *instance);
-int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
+int wd33c93_show_info(struct seq_file *, struct Scsi_Host *);
+int wd33c93_write_info(struct Scsi_Host *, char *, int);
int wd33c93_host_reset (struct scsi_cmnd *);
#endif /* WD33C93_H */
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d89a5df..f9a6e4b 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1296,9 +1296,9 @@ static void wd7000_revision(Adapter * host)
#undef SPRINTF
-#define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); }
+#define SPRINTF(args...) { seq_printf(m, ## args); }
-static int wd7000_set_info(char *buffer, int length, struct Scsi_Host *host)
+static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length)
{
dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length);
@@ -1310,22 +1310,15 @@ static int wd7000_set_info(char *buffer, int length, struct Scsi_Host *host)
}
-static int wd7000_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout)
+static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host)
{
Adapter *adapter = (Adapter *)host->hostdata;
unsigned long flags;
- char *pos = buffer;
#ifdef WD7000_DEBUG
Mailbox *ogmbs, *icmbs;
short count;
#endif
- /*
- * Has data been written to the file ?
- */
- if (inout)
- return (wd7000_set_info(buffer, length, host));
-
spin_lock_irqsave(host->host_lock, flags);
SPRINTF("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
SPRINTF(" IO base: 0x%x\n", adapter->iobase);
@@ -1368,17 +1361,7 @@ static int wd7000_proc_info(struct Scsi_Host *host, char *buffer, char **start,
spin_unlock_irqrestore(host->host_lock, flags);
- /*
- * Calculate start of next buffer, and return value.
- */
- *start = buffer + offset;
-
- if ((pos - buffer) < offset)
- return (0);
- else if ((pos - buffer - offset) < length)
- return (pos - buffer - offset);
- else
- return (length);
+ return 0;
}
@@ -1413,7 +1396,8 @@ static __init int wd7000_detect(struct scsi_host_template *tpnt)
for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1);
tpnt->proc_name = "wd7000";
- tpnt->proc_info = &wd7000_proc_info;
+ tpnt->show_info = &wd7000_show_info;
+ tpnt->write_info = wd7000_set_info;
/*
* Set up SCB free list, which is shared by all adapters
@@ -1658,7 +1642,8 @@ MODULE_LICENSE("GPL");
static struct scsi_host_template driver_template = {
.proc_name = "wd7000",
- .proc_info = wd7000_proc_info,
+ .show_info = wd7000_show_info,
+ .write_info = wd7000_set_info,
.name = "Western Digital WD-7000",
.detect = wd7000_detect,
.release = wd7000_release,
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 8234d22..2e8f24a 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -776,10 +776,10 @@ rx_dma_failed:
#if defined(CONFIG_OF)
static const struct of_device_id davinci_spi_of_match[] = {
{
- .compatible = "ti,dm644x-spi",
+ .compatible = "ti,dm6441-spi",
},
{
- .compatible = "ti,da8xx-spi",
+ .compatible = "ti,da830-spi",
.data = (void *)SPI_VERSION_2,
},
{ },
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index a1d5778..8498276 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -490,21 +490,6 @@ static int mxs_spi_transfer_one(struct spi_master *master,
return status;
}
-static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param)
-{
- struct mxs_ssp *ssp = param;
-
- if (!mxs_dma_is_apbh(chan))
- return false;
-
- if (chan->chan_id != ssp->dma_channel)
- return false;
-
- chan->private = &ssp->dma_data;
-
- return true;
-}
-
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -520,13 +505,12 @@ static int mxs_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct mxs_spi *spi;
struct mxs_ssp *ssp;
- struct resource *iores, *dmares;
+ struct resource *iores;
struct pinctrl *pinctrl;
struct clk *clk;
void __iomem *base;
- int devid, dma_channel, clk_freq;
- int ret = 0, irq_err, irq_dma;
- dma_cap_mask_t mask;
+ int devid, clk_freq;
+ int ret = 0, irq_err;
/*
* Default clock speed for the SPI core. 160MHz seems to
@@ -537,8 +521,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
- irq_dma = platform_get_irq(pdev, 1);
- if (!iores || irq_err < 0 || irq_dma < 0)
+ if (!iores || irq_err < 0)
return -EINVAL;
base = devm_ioremap_resource(&pdev->dev, iores);
@@ -553,32 +536,11 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- if (np) {
- devid = (enum mxs_ssp_id) of_id->data;
- /*
- * TODO: This is a temporary solution and should be changed
- * to use generic DMA binding later when the helpers get in.
- */
- ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
- &dma_channel);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to get DMA channel\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32(np, "clock-frequency",
- &clk_freq);
- if (ret)
- clk_freq = clk_freq_default;
- } else {
- dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmares)
- return -EINVAL;
- devid = pdev->id_entry->driver_data;
- dma_channel = dmares->start;
+ devid = (enum mxs_ssp_id) of_id->data;
+ ret = of_property_read_u32(np, "clock-frequency",
+ &clk_freq);
+ if (ret)
clk_freq = clk_freq_default;
- }
master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (!master)
@@ -597,7 +559,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
ssp->clk = clk;
ssp->base = base;
ssp->devid = devid;
- ssp->dma_channel = dma_channel;
init_completion(&spi->c);
@@ -606,10 +567,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (ret)
goto out_master_free;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- ssp->dma_data.chan_irq = irq_dma;
- ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp);
+ ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!ssp->dmach) {
dev_err(ssp->dev, "Failed to request DMA\n");
ret = -ENODEV;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index b0fe393..371cc66f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1139,6 +1139,35 @@ err_no_rxchan:
return -ENODEV;
}
+static int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ struct device *dev = &pl022->adev->dev;
+
+ /* automatically configure DMA channels from platform, normally using DT */
+ pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx");
+ if (!pl022->dma_rx_channel)
+ goto err_no_rxchan;
+
+ pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx");
+ if (!pl022->dma_tx_channel)
+ goto err_no_txchan;
+
+ pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pl022->dummypage)
+ goto err_no_dummypage;
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(pl022->dma_tx_channel);
+ pl022->dma_tx_channel = NULL;
+err_no_txchan:
+ dma_release_channel(pl022->dma_rx_channel);
+ pl022->dma_rx_channel = NULL;
+err_no_rxchan:
+ return -ENODEV;
+}
+
static void terminate_dma(struct pl022 *pl022)
{
struct dma_chan *rxchan = pl022->dma_rx_channel;
@@ -1167,6 +1196,11 @@ static inline int configure_dma(struct pl022 *pl022)
return -ENODEV;
}
+static inline int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ return 0;
+}
+
static inline int pl022_dma_probe(struct pl022 *pl022)
{
return 0;
@@ -2226,8 +2260,13 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_irq;
}
- /* Get DMA channels */
- if (platform_info->enable_dma) {
+ /* Get DMA channels, try autoconfiguration first */
+ status = pl022_dma_autoprobe(pl022);
+
+ /* If that failed, use channels from platform_info */
+ if (status == 0)
+ platform_info->enable_dma = 1;
+ else if (platform_info->enable_dma) {
status = pl022_dma_probe(pl022);
if (status != 0)
platform_info->enable_dma = 0;
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index b14a557..b040200 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <linux/aio.h>
#include "logger.h"
#include <asm/ioctls.h>
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index 362c214..886c202 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -31,17 +31,15 @@
#include "comedidev.h"
#include "comedi_internal.h"
#include <linux/proc_fs.h>
-#include <linux/string.h>
+#include <linux/seq_file.h>
-static int comedi_read(char *buf, char **start, off_t offset, int len,
- int *eof, void *data)
+static int comedi_read(struct seq_file *m, void *v)
{
int i;
int devices_q = 0;
- int l = 0;
struct comedi_driver *driv;
- l += sprintf(buf + l,
+ seq_printf(m,
"comedi version " COMEDI_RELEASE "\n"
"format string: %s\n",
"\"%2d: %-20s %-20s %4d\", i, "
@@ -49,42 +47,51 @@ static int comedi_read(char *buf, char **start, off_t offset, int len,
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_from_minor(i);
-
if (!dev)
continue;
if (dev->attached) {
devices_q = 1;
- l += sprintf(buf + l, "%2d: %-20s %-20s %4d\n",
- i,
- dev->driver->driver_name,
- dev->board_name, dev->n_subdevices);
+ seq_printf(m, "%2d: %-20s %-20s %4d\n",
+ i, dev->driver->driver_name,
+ dev->board_name, dev->n_subdevices);
}
}
if (!devices_q)
- l += sprintf(buf + l, "no devices\n");
+ seq_puts(m, "no devices\n");
for (driv = comedi_drivers; driv; driv = driv->next) {
- l += sprintf(buf + l, "%s:\n", driv->driver_name);
- for (i = 0; i < driv->num_names; i++) {
- l += sprintf(buf + l, " %s\n",
- *(char **)((char *)driv->board_name +
- i * driv->offset));
- }
+ seq_printf(m, "%s:\n", driv->driver_name);
+ for (i = 0; i < driv->num_names; i++)
+ seq_printf(m, " %s\n",
+ *(char **)((char *)driv->board_name +
+ i * driv->offset));
+
if (!driv->num_names)
- l += sprintf(buf + l, " %s\n", driv->driver_name);
+ seq_printf(m, " %s\n", driv->driver_name);
}
- return l;
+ return 0;
}
-void comedi_proc_init(void)
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int comedi_proc_open(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *comedi_proc;
+ return single_open(file, comedi_read, NULL);
+}
+
+static const struct file_operations comedi_proc_fops = {
+ .open = comedi_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
- comedi_proc = create_proc_entry("comedi", S_IFREG | S_IRUGO, NULL);
- if (comedi_proc)
- comedi_proc->read_proc = comedi_read;
+void comedi_proc_init(void)
+{
+ proc_create("comedi", 0644, NULL, &comedi_proc_fops);
}
void comedi_proc_cleanup(void)
diff --git a/drivers/staging/csr/csr_wifi_hip_udi.c b/drivers/staging/csr/csr_wifi_hip_udi.c
index a65b822..a6b006b 100644
--- a/drivers/staging/csr/csr_wifi_hip_udi.c
+++ b/drivers/staging/csr/csr_wifi_hip_udi.c
@@ -24,10 +24,50 @@
*
* ---------------------------------------------------------------------------
*/
+#include <linux/seq_file.h>
#include "csr_wifi_hip_unifi.h"
#include "csr_wifi_hip_card.h"
+static void unifi_print_unsafe_sdio_status(card_t *card, struct seq_file *m)
+{
+#ifdef CSR_UNSAFE_SDIO_ACCESS
+ s32 iostate;
+ CsrResult r;
+ static const char *const states[] = {
+ "AWAKE", "DROWSY", "TORPID"
+ };
+#define SHARED_READ_RETRY_LIMIT 10
+ u8 b;
+
+ seq_printf(m, "Host State: %s\n", states[card->host_state]);
+
+ r = unifi_check_io_status(card, &iostate);
+ if (iostate == 1) {
+ seq_puts(m, remaining, "I/O Check: F1 disabled\n");
+ } else {
+ if (iostate == 1) {
+ seq_puts(m, "I/O Check: pending interrupt\n");
+
+ seq_printf(m, "BH reason interrupt = %d\n", card->bh_reason_unifi);
+ seq_printf(m, "BH reason host = %d\n", card->bh_reason_host);
+
+ for (i = 0; i < SHARED_READ_RETRY_LIMIT; i++) {
+ r = unifi_read_8_or_16(card, card->sdio_ctrl_addr + 2, &b);
+ if (r == CSR_RESULT_SUCCESS && !(b & 0x80)) {
+ seq_printf(m, "fhsr: %u (driver thinks is %u)\n",
+ b, card->from_host_signals_r);
+ break;
+ }
+ }
+
+ iostate = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
+ seq_printf(m, "thsw: %u (driver thinks is %u)\n",
+ iostate, card->to_host_signals_w);
+ }
+#endif
+}
+
/*
* ---------------------------------------------------------------------------
* unifi_print_status
@@ -41,228 +81,93 @@
* None.
* ---------------------------------------------------------------------------
*/
-s32 unifi_print_status(card_t *card, char *str, s32 *remain)
+s32 unifi_print_status(card_t *card, struct seq_file *m)
{
- char *p = str;
- sdio_config_data_t *cfg;
- u16 i, n;
- s32 remaining = *remain;
- s32 written;
-#ifdef CSR_UNSAFE_SDIO_ACCESS
- s32 iostate;
- CsrResult r;
- static const char *const states[] = {
- "AWAKE", "DROWSY", "TORPID"
- };
- #define SHARED_READ_RETRY_LIMIT 10
- u8 b;
-#endif
-
- if (remaining <= 0)
- {
- return 0;
- }
-
- i = n = 0;
- written = scnprintf(p, remaining, "Chip ID %u\n",
- (u16)card->chip_id);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "Chip Version %04X\n",
- card->chip_version);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "HIP v%u.%u\n",
- (card->config_data.version >> 8) & 0xFF,
- card->config_data.version & 0xFF);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "Build %u: %s\n",
- card->build_id, card->build_id_string);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- cfg = &card->config_data;
-
- written = scnprintf(p, remaining, "sdio ctrl offset %u\n",
- cfg->sdio_ctrl_offset);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "fromhost sigbuf handle %u\n",
- cfg->fromhost_sigbuf_handle);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "tohost_sigbuf_handle %u\n",
- cfg->tohost_sigbuf_handle);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "num_fromhost_sig_frags %u\n",
- cfg->num_fromhost_sig_frags);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "num_tohost_sig_frags %u\n",
- cfg->num_tohost_sig_frags);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "num_fromhost_data_slots %u\n",
- cfg->num_fromhost_data_slots);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "num_tohost_data_slots %u\n",
- cfg->num_tohost_data_slots);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "data_slot_size %u\n",
- cfg->data_slot_size);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- /* Added by protocol version 0x0001 */
- written = scnprintf(p, remaining, "overlay_size %u\n",
- (u16)cfg->overlay_size);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- /* Added by protocol version 0x0300 */
- written = scnprintf(p, remaining, "data_slot_round %u\n",
- cfg->data_slot_round);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "sig_frag_size %u\n",
- cfg->sig_frag_size);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- /* Added by protocol version 0x0300 */
- written = scnprintf(p, remaining, "tohost_sig_pad %u\n",
- cfg->tohost_signal_padding);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- written = scnprintf(p, remaining, "\nInternal state:\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- written = scnprintf(p, remaining, "Last PHY PANIC: %04x:%04x\n",
- card->last_phy_panic_code, card->last_phy_panic_arg);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "Last MAC PANIC: %04x:%04x\n",
- card->last_mac_panic_code, card->last_mac_panic_arg);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- written = scnprintf(p, remaining, "fhsr: %u\n",
- (u16)card->from_host_signals_r);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "fhsw: %u\n",
- (u16)card->from_host_signals_w);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "thsr: %u\n",
- (u16)card->to_host_signals_r);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "thsw: %u\n",
- (u16)card->to_host_signals_w);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining,
- "fh buffer contains: %d signals, %td bytes\n",
- card->fh_buffer.count,
- card->fh_buffer.ptr - card->fh_buffer.buf);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- written = scnprintf(p, remaining, "paused: ");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- for (i = 0; i < sizeof(card->tx_q_paused_flag) / sizeof(card->tx_q_paused_flag[0]); i++)
- {
- written = scnprintf(p, remaining, card->tx_q_paused_flag[i]?"1" : "0");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
- written = scnprintf(p, remaining, "\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- written = scnprintf(p, remaining,
- "fh command q: %u waiting, %u free of %u:\n",
- CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_command_queue),
- CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_command_queue),
- UNIFI_SOFT_COMMAND_Q_LENGTH);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- written = scnprintf(p, remaining,
- "fh traffic q[%u]: %u waiting, %u free of %u:\n",
- i,
- CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_traffic_queue[i]),
- CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_traffic_queue[i]),
- UNIFI_SOFT_TRAFFIC_Q_LENGTH);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
-
- written = scnprintf(p, remaining, "fh data slots free: %u\n",
- card->from_host_data?CardGetFreeFromHostDataSlots(card) : 0);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
-
- written = scnprintf(p, remaining, "From host data slots:");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- n = card->config_data.num_fromhost_data_slots;
- for (i = 0; i < n && card->from_host_data; i++)
- {
- written = scnprintf(p, remaining, " %u",
- (u16)card->from_host_data[i].bd.data_length);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
- written = scnprintf(p, remaining, "\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- written = scnprintf(p, remaining, "To host data slots:");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- n = card->config_data.num_tohost_data_slots;
- for (i = 0; i < n && card->to_host_data; i++)
- {
- written = scnprintf(p, remaining, " %u",
- (u16)card->to_host_data[i].data_length);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
-
- written = scnprintf(p, remaining, "\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
-#ifdef CSR_UNSAFE_SDIO_ACCESS
- written = scnprintf(p, remaining, "Host State: %s\n", states[card->host_state]);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- r = unifi_check_io_status(card, &iostate);
- if (iostate == 1)
- {
- written = scnprintf(p, remaining, "I/O Check: F1 disabled\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
- else
- {
- if (iostate == 1)
- {
- written = scnprintf(p, remaining, "I/O Check: pending interrupt\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
-
- written = scnprintf(p, remaining, "BH reason interrupt = %d\n",
- card->bh_reason_unifi);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "BH reason host = %d\n",
- card->bh_reason_host);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- for (i = 0; i < SHARED_READ_RETRY_LIMIT; i++)
- {
- r = unifi_read_8_or_16(card, card->sdio_ctrl_addr + 2, &b);
- if ((r == CSR_RESULT_SUCCESS) && (!(b & 0x80)))
- {
- written = scnprintf(p, remaining, "fhsr: %u (driver thinks is %u)\n",
- b, card->from_host_signals_r);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- break;
- }
- }
- iostate = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
- written = scnprintf(p, remaining, "thsw: %u (driver thinks is %u)\n",
- iostate, card->to_host_signals_w);
- UNIFI_SNPRINTF_RET(p, remaining, written);
- }
-#endif
-
- written = scnprintf(p, remaining, "\nStats:\n");
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "Total SDIO bytes: R=%u W=%u\n",
- card->sdio_bytes_read, card->sdio_bytes_written);
-
- UNIFI_SNPRINTF_RET(p, remaining, written);
- written = scnprintf(p, remaining, "Interrupts generated on card: %u\n",
- card->unifi_interrupt_seq);
- UNIFI_SNPRINTF_RET(p, remaining, written);
-
- *remain = remaining;
- return (p - str);
-} /* unifi_print_status() */
-
-
+ sdio_config_data_t *cfg;
+ u16 i, n;
+
+ i = n = 0;
+ seq_printf(m, "Chip ID %u\n", card->chip_id);
+ seq_printf(m, "Chip Version %04X\n", card->chip_version);
+ seq_printf(m, "HIP v%u.%u\n",
+ (card->config_data.version >> 8) & 0xFF,
+ card->config_data.version & 0xFF);
+ seq_printf(m, "Build %u: %s\n", card->build_id, card->build_id_string);
+
+ cfg = &card->config_data;
+
+ seq_printf(m, "sdio ctrl offset %u\n", cfg->sdio_ctrl_offset);
+ seq_printf(m, "fromhost sigbuf handle %u\n", cfg->fromhost_sigbuf_handle);
+ seq_printf(m, "tohost_sigbuf_handle %u\n", cfg->tohost_sigbuf_handle);
+ seq_printf(m, "num_fromhost_sig_frags %u\n", cfg->num_fromhost_sig_frags);
+ seq_printf(m, "num_tohost_sig_frags %u\n", cfg->num_tohost_sig_frags);
+ seq_printf(m, "num_fromhost_data_slots %u\n", cfg->num_fromhost_data_slots);
+ seq_printf(m, "num_tohost_data_slots %u\n", cfg->num_tohost_data_slots);
+ seq_printf(m, "data_slot_size %u\n", cfg->data_slot_size);
+
+ /* Added by protocol version 0x0001 */
+ seq_printf(m, "overlay_size %u\n", cfg->overlay_size);
+
+ /* Added by protocol version 0x0300 */
+ seq_printf(m, "data_slot_round %u\n", cfg->data_slot_round);
+ seq_printf(m, "sig_frag_size %u\n", cfg->sig_frag_size);
+
+ /* Added by protocol version 0x0300 */
+ seq_printf(m, "tohost_sig_pad %u\n", cfg->tohost_signal_padding);
+
+ seq_puts(m, "\nInternal state:\n");
+
+ seq_printf(m, "Last PHY PANIC: %04x:%04x\n",
+ card->last_phy_panic_code, card->last_phy_panic_arg);
+ seq_printf(m, "Last MAC PANIC: %04x:%04x\n",
+ card->last_mac_panic_code, card->last_mac_panic_arg);
+
+ seq_printf(m, "fhsr: %hu\n", (u16)card->from_host_signals_r);
+ seq_printf(m, "fhsw: %hu\n", (u16)card->from_host_signals_w);
+ seq_printf(m, "thsr: %hu\n", (u16)card->to_host_signals_r);
+ seq_printf(m, "thsw: %hu\n", (u16)card->to_host_signals_w);
+ seq_printf(m, "fh buffer contains: %d signals, %td bytes\n",
+ card->fh_buffer.count,
+ card->fh_buffer.ptr - card->fh_buffer.buf);
+
+ seq_puts(m, "paused: ");
+ for (i = 0; i < ARRAY_SIZE(card->tx_q_paused_flag); i++)
+ seq_printf(m, card->tx_q_paused_flag[i] ? "1" : "0");
+ seq_putc(m, '\n');
+
+ seq_printf(m, "fh command q: %u waiting, %u free of %u:\n",
+ CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_command_queue),
+ CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_command_queue),
+ UNIFI_SOFT_COMMAND_Q_LENGTH);
+
+ for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
+ seq_printf(m, "fh traffic q[%u]: %u waiting, %u free of %u:\n",
+ i,
+ CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_traffic_queue[i]),
+ CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_traffic_queue[i]),
+ UNIFI_SOFT_TRAFFIC_Q_LENGTH);
+
+ seq_printf(m, "fh data slots free: %u\n",
+ card->from_host_data ? CardGetFreeFromHostDataSlots(card) : 0);
+
+ seq_puts(m, "From host data slots:");
+ n = card->config_data.num_fromhost_data_slots;
+ for (i = 0; i < n && card->from_host_data; i++)
+ seq_printf(m, " %hu", (u16)card->from_host_data[i].bd.data_length);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "To host data slots:");
+ n = card->config_data.num_tohost_data_slots;
+ for (i = 0; i < n && card->to_host_data; i++)
+ seq_printf(m, " %hu", (u16)card->to_host_data[i].data_length);
+ seq_putc(m, '\n');
+
+ unifi_print_unsafe_sdio_status(card, m);
+
+ seq_puts(m, "\nStats:\n");
+ seq_printf(m, "Total SDIO bytes: R=%u W=%u\n",
+ card->sdio_bytes_read, card->sdio_bytes_written);
+
+ seq_printf(m, "Interrupts generated on card: %u\n", card->unifi_interrupt_seq);
+ return 0;
+}
diff --git a/drivers/staging/csr/csr_wifi_hip_unifi_udi.h b/drivers/staging/csr/csr_wifi_hip_unifi_udi.h
index 9d85cfd..4126e85 100644
--- a/drivers/staging/csr/csr_wifi_hip_unifi_udi.h
+++ b/drivers/staging/csr/csr_wifi_hip_unifi_udi.h
@@ -47,21 +47,6 @@ CsrResult unifi_remove_udi_hook(card_t *card, udi_func_t udi_fn);
* This is used in the linux /proc interface and might be useful
* in other systems.
*/
-s32 unifi_print_status(card_t *card, char *str, s32 *remain);
-
-#define UNIFI_SNPRINTF_RET(buf_p, remain, written) \
- do { \
- if (written >= remain) { \
- if (remain >= 2) { \
- buf_p[remain - 2] = '\n'; \
- buf_p[remain - 1] = 0; \
- } \
- buf_p += remain; \
- remain = 0; \
- } else if (written > 0) { \
- buf_p += written; \
- remain -= written; \
- } \
- } while (0)
+s32 unifi_print_status(card_t *card, struct seq_file *m);
#endif /* __CSR_WIFI_HIP_UNIFI_UDI_H__ */
diff --git a/drivers/staging/csr/drv.c b/drivers/staging/csr/drv.c
index 5520d65..bdc2523 100644
--- a/drivers/staging/csr/drv.c
+++ b/drivers/staging/csr/drv.c
@@ -1941,7 +1941,7 @@ uf_sme_queue_message(unifi_priv_t *priv, u8 *buffer, int length)
*
****************************************************************************
*/
-static struct file_operations unifi_fops = {
+static const struct file_operations unifi_fops = {
.owner = THIS_MODULE,
.open = unifi_open,
.release = unifi_release,
@@ -2041,7 +2041,7 @@ void uf_destroy_device_nodes(unifi_priv_t *priv)
* ----------------------------------------------------------------
*/
static int
-uf_create_debug_device(struct file_operations *fops)
+uf_create_debug_device(const struct file_operations *fops)
{
int ret;
diff --git a/drivers/staging/csr/io.c b/drivers/staging/csr/io.c
index af9c28f..fe4a7ba 100644
--- a/drivers/staging/csr/io.c
+++ b/drivers/staging/csr/io.c
@@ -31,6 +31,7 @@
* ---------------------------------------------------------------------------
*/
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include "csr_wifi_hip_unifi.h"
#include "csr_wifi_hip_unifiversion.h"
@@ -76,9 +77,28 @@ DEFINE_SEMAPHORE(Unifi_instance_mutex);
*/
DECLARE_WAIT_QUEUE_HEAD(Unifi_cleanup_wq);
+#ifdef CONFIG_PROC_FS
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int uf_proc_show(struct seq_file *m, void *v);
+
+#define UNIFI_DEBUG_TXT_BUFFER (8 * 1024)
+
+static int uf_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, uf_proc_show, PDE_DATA(inode),
+ UNIFI_DEBUG_TXT_BUFFER);
+}
+
+static const struct file_operations uf_proc_fops = {
+ .open = uf_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
-static int uf_read_proc(char *page, char **start, off_t offset, int count,
- int *eof, void *data);
+#endif /* CONFIG_PROC_FS */
#ifdef CSR_WIFI_RX_PATH_SPLIT
@@ -327,8 +347,8 @@ register_unifi_sdio(CsrSdioFunction *sdio_dev, int bus_id, struct device *dev)
* The following complex casting is in place in order to eliminate 64-bit compilation warning
* "cast to/from pointer from/to integer of different size"
*/
- if (!create_proc_read_entry(priv->proc_entry_name, 0, 0,
- uf_read_proc, (void *)(long)priv->instance))
+ if (!proc_create_data(priv->proc_entry_name, 0, NULL,
+ &uf_proc_fops, (void *)(long)priv->instance))
{
unifi_error(priv, "unifi: can't create /proc/driver/unifi\n");
}
@@ -827,7 +847,7 @@ uf_put_instance(int inst)
/*
* ---------------------------------------------------------------------------
- * uf_read_proc
+ * uf_proc_show
*
* Read method for driver node in /proc/driver/unifi0
*
@@ -844,107 +864,54 @@ uf_put_instance(int inst)
* ---------------------------------------------------------------------------
*/
#ifdef CONFIG_PROC_FS
-static int
-uf_read_proc(char *page, char **start, off_t offset, int count,
- int *eof, void *data)
+static int uf_proc_show(struct seq_file *m, void *v)
{
-#define UNIFI_DEBUG_TXT_BUFFER 8*1024
- unifi_priv_t *priv;
- int actual_amount_to_copy;
- char *p, *orig_p;
- s32 remain = UNIFI_DEBUG_TXT_BUFFER;
- s32 written;
- int i;
-
- /*
- * The following complex casting is in place in order to eliminate 64-bit compilation warning
- * "cast to/from pointer from/to integer of different size"
- */
- priv = uf_find_instance((int)(long)data);
- if (!priv) {
- return 0;
- }
-
- p = kmalloc( UNIFI_DEBUG_TXT_BUFFER, GFP_KERNEL );
-
- orig_p = p;
-
- written = scnprintf(p, remain, "UniFi SDIO Driver: %s %s %s\n",
- CSR_WIFI_VERSION, __DATE__, __TIME__);
- UNIFI_SNPRINTF_RET(p, remain, written);
+ unifi_priv_t *priv;
+ int i;
+
+ /*
+ * The following complex casting is in place in order to eliminate
+ * 64-bit compilation warning "cast to/from pointer from/to integer of
+ * different size"
+ */
+ priv = uf_find_instance((long)m->private);
+ if (!priv)
+ return 0;
+
+ seq_printf(m, "UniFi SDIO Driver: %s %s %s\n",
+ CSR_WIFI_VERSION, __DATE__, __TIME__);
#ifdef CSR_SME_USERSPACE
- written = scnprintf(p, remain, "SME: CSR userspace ");
- UNIFI_SNPRINTF_RET(p, remain, written);
+ seq_puts(m, "SME: CSR userspace ");
#ifdef CSR_SUPPORT_WEXT
- written = scnprintf(p, remain, "with WEXT support\n");
+ seq_puts(m, "with WEXT support\n");
#else
- written = scnprintf(p, remain, "\n");
+ seq_putc(m, '\n');
#endif /* CSR_SUPPORT_WEXT */
- UNIFI_SNPRINTF_RET(p, remain, written);
#endif /* CSR_SME_USERSPACE */
#ifdef CSR_NATIVE_LINUX
- written = scnprintf(p, remain, "SME: native\n");
- UNIFI_SNPRINTF_RET(p, remain, written);
+ seq_puts(m, "SME: native\n");
#endif
#ifdef CSR_SUPPORT_SME
- written = scnprintf(p, remain,
- "Firmware (ROM) build:%u, Patch:%u\n",
- priv->card_info.fw_build,
- priv->sme_versions.firmwarePatch);
- UNIFI_SNPRINTF_RET(p, remain, written);
+ seq_printf(m, "Firmware (ROM) build:%u, Patch:%u\n",
+ priv->card_info.fw_build,
+ priv->sme_versions.firmwarePatch);
#endif
- p += unifi_print_status(priv->card, p, &remain);
-
- written = scnprintf(p, remain, "Last dbg str: %s\n",
- priv->last_debug_string);
- UNIFI_SNPRINTF_RET(p, remain, written);
-
- written = scnprintf(p, remain, "Last dbg16:");
- UNIFI_SNPRINTF_RET(p, remain, written);
- for (i = 0; i < 8; i++) {
- written = scnprintf(p, remain, " %04X",
- priv->last_debug_word16[i]);
- UNIFI_SNPRINTF_RET(p, remain, written);
- }
- written = scnprintf(p, remain, "\n");
- UNIFI_SNPRINTF_RET(p, remain, written);
- written = scnprintf(p, remain, " ");
- UNIFI_SNPRINTF_RET(p, remain, written);
- for (; i < 16; i++) {
- written = scnprintf(p, remain, " %04X",
- priv->last_debug_word16[i]);
- UNIFI_SNPRINTF_RET(p, remain, written);
- }
- written = scnprintf(p, remain, "\n");
- UNIFI_SNPRINTF_RET(p, remain, written);
- *start = page;
-
- written = UNIFI_DEBUG_TXT_BUFFER - remain;
-
- if( offset >= written )
- {
- *eof = 1;
- kfree( orig_p );
- return(0);
- }
-
- if( offset + count > written )
- {
- actual_amount_to_copy = written - offset;
- *eof = 1;
- }
- else
- {
- actual_amount_to_copy = count;
- }
- memcpy( page, &(orig_p[offset]), actual_amount_to_copy );
+ unifi_print_status(priv->card, m);
- kfree( orig_p );
+ seq_printf(m, "Last dbg str: %s\n", priv->last_debug_string);
- return( actual_amount_to_copy );
-} /* uf_read_proc() */
+ seq_puts(m, "Last dbg16:");
+ for (i = 0; i < 8; i++)
+ seq_printf(m, " %04X", priv->last_debug_word16[i]);
+ seq_putc(m, '\n');
+ seq_puts(m, " ");
+ for (; i < 16; i++)
+ seq_printf(m, " %04X", priv->last_debug_word16[i]);
+ seq_putc(m, '\n');
+ return 0;
+}
#endif
diff --git a/drivers/staging/cxt1e1/Makefile b/drivers/staging/cxt1e1/Makefile
index e99b823..b9ccb76 100644
--- a/drivers/staging/cxt1e1/Makefile
+++ b/drivers/staging/cxt1e1/Makefile
@@ -12,8 +12,9 @@ cxt1e1-y := \
linux.o \
functions.o \
hwprobe.o \
- sbeproc.o \
pmc93x6_eeprom.o \
sbecrc.o \
comet_tables.o \
sbeid.o
+
+cxt1e1-$(CONFIG_PROC_FS) += sbeproc.o
diff --git a/drivers/staging/cxt1e1/sbeproc.c b/drivers/staging/cxt1e1/sbeproc.c
index f42531c..9361dd8 100644
--- a/drivers/staging/cxt1e1/sbeproc.c
+++ b/drivers/staging/cxt1e1/sbeproc.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include "pmcc4_sysdep.h"
@@ -26,332 +27,193 @@
#include "pmcc4_private.h"
#include "sbeproc.h"
-/* forwards */
-void sbecom_get_brdinfo (ci_t *, struct sbe_brd_info *, u_int8_t *);
+extern void sbecom_get_brdinfo(ci_t *, struct sbe_brd_info *, u_int8_t *);
extern struct s_hdw_info hdw_info[MAX_BOARDS];
-#ifdef CONFIG_PROC_FS
-
-/********************************************************************/
-/* procfs stuff */
-/********************************************************************/
-
-
-void
-sbecom_proc_brd_cleanup (ci_t * ci)
+void sbecom_proc_brd_cleanup(ci_t *ci)
{
- if (ci->dir_dev)
- {
- char dir[7 + SBE_IFACETMPL_SIZE + 1];
- snprintf(dir, sizeof(dir), "driver/%s", ci->devname);
- remove_proc_entry("info", ci->dir_dev);
- remove_proc_entry(dir, NULL);
- ci->dir_dev = NULL;
- }
+ if (ci->dir_dev) {
+ char dir[7 + SBE_IFACETMPL_SIZE + 1];
+ snprintf(dir, sizeof(dir), "driver/%s", ci->devname);
+ remove_proc_entry("info", ci->dir_dev);
+ remove_proc_entry(dir, NULL);
+ ci->dir_dev = NULL;
+ }
}
-
-static int
-sbecom_proc_get_sbe_info (char *buffer, char **start, off_t offset,
- int length, int *eof, void *priv)
+static void sbecom_proc_get_brdinfo(ci_t *ci, struct sbe_brd_info *bip)
{
- ci_t *ci = (ci_t *) priv;
- int len = 0;
- char *spd;
- struct sbe_brd_info *bip;
-
- if (!(bip = OS_kmalloc (sizeof (struct sbe_brd_info))))
- {
- return -ENOMEM;
- }
-#if 0
- /** RLD DEBUG **/
- pr_info(">> sbecom_proc_get_sbe_info: entered, offset %d. length %d.\n",
- (int) offset, (int) length);
-#endif
-
- {
- hdw_info_t *hi = &hdw_info[ci->brdno];
-
- u_int8_t *bsn = 0;
-
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- bsn = (u_int8_t *) hi->mfg_info.pft1.Serial;
- break;
- case PROM_FORMAT_TYPE2:
- bsn = (u_int8_t *) hi->mfg_info.pft2.Serial;
- break;
- }
-
- sbecom_get_brdinfo (ci, bip, bsn);
- }
-
-#if 0
- /** RLD DEBUG **/
- pr_info(">> sbecom_get_brdinfo: returned, first_if %p <%s> last_if %p <%s>\n",
- (char *) &bip->first_iname, (char *) &bip->first_iname,
- (char *) &bip->last_iname, (char *) &bip->last_iname);
-#endif
- len += sprintf (buffer + len, "Board Type: ");
- switch (bip->brd_id)
- {
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3):
- len += sprintf (buffer + len, "wanPMC-C1T3");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
- len += sprintf (buffer + len, "wanPTMC-256T3 <E1>");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
- len += sprintf (buffer + len, "wanPTMC-256T3 <T1>");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1):
- len += sprintf (buffer + len, "wanPTMC-C24TE1");
- break;
-
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
- len += sprintf (buffer + len, "wanPMC-C4T1E1");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
- len += sprintf (buffer + len, "wanPMC-C2T1E1");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
- len += sprintf (buffer + len, "wanPMC-C1T1E1");
- break;
-
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
- len += sprintf (buffer + len, "wanPCI-C4T1E1");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
- len += sprintf (buffer + len, "wanPCI-C2T1E1");
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
- len += sprintf (buffer + len, "wanPCI-C1T1E1");
- break;
-
- default:
- len += sprintf (buffer + len, "unknown");
- break;
- }
- len += sprintf (buffer + len, " [%08X]\n", bip->brd_id);
+ hdw_info_t *hi = &hdw_info[ci->brdno];
+ u_int8_t *bsn = 0;
+
+ switch (hi->promfmt)
+ {
+ case PROM_FORMAT_TYPE1:
+ bsn = (u_int8_t *) hi->mfg_info.pft1.Serial;
+ break;
+ case PROM_FORMAT_TYPE2:
+ bsn = (u_int8_t *) hi->mfg_info.pft2.Serial;
+ break;
+ }
+
+ sbecom_get_brdinfo (ci, bip, bsn);
+
+ pr_devel(">> sbecom_get_brdinfo: returned, first_if %p <%s> last_if %p <%s>\n",
+ bip->first_iname, bip->first_iname,
+ bip->last_iname, bip->last_iname);
+}
- len += sprintf (buffer + len, "Board Number: %d\n", bip->brdno);
- len += sprintf (buffer + len, "Hardware ID: 0x%02X\n", ci->hdw_bid);
- len += sprintf (buffer + len, "Board SN: %06X\n", bip->brd_sn);
- len += sprintf(buffer + len, "Board MAC: %pMF\n",
- bip->brd_mac_addr);
- len += sprintf (buffer + len, "Ports: %d\n", ci->max_port);
- len += sprintf (buffer + len, "Channels: %d\n", bip->brd_chan_cnt);
+/*
+ * Describe the driver state through /proc
+ */
+static int sbecom_proc_get_sbe_info(struct seq_file *m, void *v)
+{
+ ci_t *ci = m->private;
+ char *spd;
+ struct sbe_brd_info *bip;
+
+ if (!(bip = OS_kmalloc(sizeof(struct sbe_brd_info))))
+ return -ENOMEM;
+
+ pr_devel(">> sbecom_proc_get_sbe_info: entered\n");
+
+ sbecom_proc_get_brdinfo(ci, bip);
+
+ seq_puts(m, "Board Type: ");
+ switch (bip->brd_id) {
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3):
+ seq_puts(m, "wanPMC-C1T3");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ seq_puts(m, "wanPTMC-256T3 <E1>");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ seq_puts(m, "wanPTMC-256T3 <T1>");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1):
+ seq_puts(m, "wanPTMC-C24TE1");
+ break;
+
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ seq_puts(m, "wanPMC-C4T1E1");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ seq_puts(m, "wanPMC-C2T1E1");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ seq_puts(m, "wanPMC-C1T1E1");
+ break;
+
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ seq_puts(m, "wanPCI-C4T1E1");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ seq_puts(m, "wanPCI-C2T1E1");
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ seq_puts(m, "wanPCI-C1T1E1");
+ break;
+
+ default:
+ seq_puts(m, "unknown");
+ break;
+ }
+
+ seq_printf(m, " [%08X]\n", bip->brd_id);
+
+ seq_printf(m, "Board Number: %d\n", bip->brdno);
+ seq_printf(m, "Hardware ID: 0x%02X\n", ci->hdw_bid);
+ seq_printf(m, "Board SN: %06X\n", bip->brd_sn);
+ seq_printf(m, "Board MAC: %pMF\n", bip->brd_mac_addr);
+ seq_printf(m, "Ports: %d\n", ci->max_port);
+ seq_printf(m, "Channels: %d\n", bip->brd_chan_cnt);
#if 1
- len += sprintf (buffer + len, "Interface: %s -> %s\n",
- (char *) &bip->first_iname, (char *) &bip->last_iname);
+ seq_printf(m, "Interface: %s -> %s\n",
+ bip->first_iname, bip->last_iname);
#else
- len += sprintf (buffer + len, "Interface: <not available> 1st %p lst %p\n",
- (char *) &bip->first_iname, (char *) &bip->last_iname);
+ seq_printf(m, "Interface: <not available> 1st %p lst %p\n",
+ bip->first_iname, bip->last_iname);
#endif
- switch (bip->brd_pci_speed)
- {
- case BINFO_PCI_SPEED_33:
- spd = "33Mhz";
- break;
- case BINFO_PCI_SPEED_66:
- spd = "66Mhz";
- break;
- default:
- spd = "<not available>";
- break;
- }
- len += sprintf (buffer + len, "PCI Bus Speed: %s\n", spd);
- len += sprintf (buffer + len, "Release: %s\n", ci->release);
+ switch (bip->brd_pci_speed) {
+ case BINFO_PCI_SPEED_33:
+ spd = "33Mhz";
+ break;
+ case BINFO_PCI_SPEED_66:
+ spd = "66Mhz";
+ break;
+ default:
+ spd = "<not available>";
+ break;
+ }
+ seq_printf(m, "PCI Bus Speed: %s\n", spd);
+ seq_printf(m, "Release: %s\n", ci->release);
#ifdef SBE_PMCC4_ENABLE
- {
- extern int cxt1e1_max_mru;
-#if 0
- extern int max_chans_used;
- extern int cxt1e1_max_mtu;
-#endif
- extern int max_rxdesc_used, max_txdesc_used;
-
- len += sprintf (buffer + len, "\ncxt1e1_max_mru: %d\n", cxt1e1_max_mru);
+ {
+ extern int cxt1e1_max_mru;
#if 0
- len += sprintf (buffer + len, "\nmax_chans_used: %d\n", max_chans_used);
- len += sprintf (buffer + len, "cxt1e1_max_mtu: %d\n", cxt1e1_max_mtu);
-#endif
- len += sprintf (buffer + len, "max_rxdesc_used: %d\n", max_rxdesc_used);
- len += sprintf (buffer + len, "max_txdesc_used: %d\n", max_txdesc_used);
- }
-#endif
-
- OS_kfree (bip); /* cleanup */
-
- /***
- * How to be a proc read function
- * ------------------------------
- * Prototype:
- * int f(char *buffer, char **start, off_t offset,
- * int count, int *peof, void *dat)
- *
- * Assume that the buffer is "count" bytes in size.
- *
- * If you know you have supplied all the data you
- * have, set *peof.
- *
- * You have three ways to return data:
- * 0) Leave *start = NULL. (This is the default.)
- * Put the data of the requested offset at that
- * offset within the buffer. Return the number (n)
- * of bytes there are from the beginning of the
- * buffer up to the last byte of data. If the
- * number of supplied bytes (= n - offset) is
- * greater than zero and you didn't signal eof
- * and the reader is prepared to take more data
- * you will be called again with the requested
- * offset advanced by the number of bytes
- * absorbed. This interface is useful for files
- * no larger than the buffer.
- * 1) Set *start = an unsigned long value less than
- * the buffer address but greater than zero.
- * Put the data of the requested offset at the
- * beginning of the buffer. Return the number of
- * bytes of data placed there. If this number is
- * greater than zero and you didn't signal eof
- * and the reader is prepared to take more data
- * you will be called again with the requested
- * offset advanced by *start. This interface is
- * useful when you have a large file consisting
- * of a series of blocks which you want to count
- * and return as wholes.
- * (Hack by Paul.Russell@rustcorp.com.au)
- * 2) Set *start = an address within the buffer.
- * Put the data of the requested offset at *start.
- * Return the number of bytes of data placed there.
- * If this number is greater than zero and you
- * didn't signal eof and the reader is prepared to
- * take more data you will be called again with the
- * requested offset advanced by the number of bytes
- * absorbed.
- */
-
-#if 1
- /* #4 - interpretation of above = set EOF, return len */
- *eof = 1;
+ extern int max_chans_used;
+ extern int cxt1e1_max_mtu;
#endif
+ extern int max_rxdesc_used, max_txdesc_used;
+ seq_printf(m, "\ncxt1e1_max_mru: %d\n", cxt1e1_max_mru);
#if 0
- /*
- * #1 - from net/wireless/atmel.c RLD NOTE -there's something wrong with
- * this plagarized code which results in this routine being called TWICE.
- * The second call returns ZERO, resulting in hidden failure, but at
- * least only a single message set is being displayed.
- */
- if (len <= offset + length)
- *eof = 1;
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- if (len < 0)
- len = 0;
-#endif
-
-#if 0 /* #2 from net/tokenring/olympic.c +
- * lanstreamer.c */
- {
- off_t begin = 0;
- int size = 0;
- off_t pos = 0;
-
- size = len;
- pos = begin + size;
- if (pos < offset)
- {
- len = 0;
- begin = pos;
- }
- *start = buffer + (offset - begin); /* Start of wanted data */
- len -= (offset - begin); /* Start slop */
- if (len > length)
- len = length; /* Ending slop */
- }
+ seq_printf(m, "\nmax_chans_used: %d\n", max_chans_used);
+ seq_printf(m, "cxt1e1_max_mtu: %d\n", cxt1e1_max_mtu);
#endif
-
-#if 0 /* #3 from
- * char/ftape/lowlevel/ftape-proc.c */
- len = strlen (buffer);
- *start = NULL;
- if (offset + length >= len)
- *eof = 1;
- else
- *eof = 0;
-#endif
-
-#if 0
- pr_info(">> proc_fs: returned len = %d., start %p\n", len, start); /* RLD DEBUG */
+ seq_printf(m, "max_rxdesc_used: %d\n", max_rxdesc_used);
+ seq_printf(m, "max_txdesc_used: %d\n", max_txdesc_used);
+ }
#endif
-/***
- using NONE: returns = 314.314.314.
- using #1 : returns = 314, 0.
- using #2 : returns = 314, 0, 0.
- using #3 : returns = 314, 314.
- using #4 : returns = 314, 314.
-***/
+ kfree(bip);
- return len;
+ pr_devel(">> proc_fs: finished\n");
+ return 0;
}
-/* initialize the /proc subsystem for the specific SBE driver */
-
-int __init
-sbecom_proc_brd_init (ci_t * ci)
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int sbecom_proc_open(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *e;
- char dir[7 + SBE_IFACETMPL_SIZE + 1];
-
- /* create a directory in the root procfs */
- snprintf(dir, sizeof(dir), "driver/%s", ci->devname);
- ci->dir_dev = proc_mkdir(dir, NULL);
- if (!ci->dir_dev)
- {
- pr_err("Unable to create directory /proc/driver/%s\n", ci->devname);
- goto fail;
- }
- e = create_proc_read_entry ("info", S_IFREG | S_IRUGO,
- ci->dir_dev, sbecom_proc_get_sbe_info, ci);
- if (!e)
- {
- pr_err("Unable to create entry /proc/driver/%s/info\n", ci->devname);
- goto fail;
- }
- return 0;
-
-fail:
- sbecom_proc_brd_cleanup (ci);
- return 1;
+ return single_open(file, sbecom_proc_get_sbe_info, PDE_DATA(inode));
}
-#else /*** ! CONFIG_PROC_FS ***/
+static const struct file_operations sbecom_proc_fops = {
+ .open = sbecom_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
-/* stubbed off dummy routines */
-
-void
-sbecom_proc_brd_cleanup (ci_t * ci)
-{
-}
-
-int __init
-sbecom_proc_brd_init (ci_t * ci)
+/*
+ * Initialize the /proc subsystem for the specific SBE driver
+ */
+int __init sbecom_proc_brd_init(ci_t *ci)
{
- return 0;
-}
-
-#endif /*** CONFIG_PROC_FS ***/
+ char dir[7 + SBE_IFACETMPL_SIZE + 1];
+ snprintf(dir, sizeof(dir), "driver/%s", ci->devname);
+ ci->dir_dev = proc_mkdir(dir, NULL);
+ if (!ci->dir_dev) {
+ pr_err("Unable to create directory /proc/driver/%s\n", ci->devname);
+ goto fail;
+ }
+
+ if (!proc_create_data("info", S_IFREG | S_IRUGO, ci->dir_dev,
+ &sbecom_proc_fops, ci)) {
+ pr_err("Unable to create entry /proc/driver/%s/info\n", ci->devname);
+ goto fail;
+ }
+ return 0;
-/*** End-of-File ***/
+fail:
+ sbecom_proc_brd_cleanup(ci);
+ return 1;
+}
diff --git a/drivers/staging/cxt1e1/sbeproc.h b/drivers/staging/cxt1e1/sbeproc.h
index e82be6a..e5c072c 100644
--- a/drivers/staging/cxt1e1/sbeproc.h
+++ b/drivers/staging/cxt1e1/sbeproc.h
@@ -23,10 +23,20 @@
#ifdef CONFIG_PROC_FS
-#ifdef __KERNEL__
void sbecom_proc_brd_cleanup (ci_t *);
int __init sbecom_proc_brd_init (ci_t *);
-#endif /*** __KERNEL__ ***/
+#else
+
+static inline void sbecom_proc_brd_cleanup(ci_t * ci)
+{
+}
+
+static inline int __init sbecom_proc_brd_init(ci_t * ci)
+{
+ return 0;
+}
+
#endif /*** CONFIG_PROC_FS ***/
+
#endif /*** _INC_SBEPROC_H_ ***/
diff --git a/drivers/staging/dgrp/dgrp_common.c b/drivers/staging/dgrp/dgrp_common.c
index 3553998..9a9b456 100644
--- a/drivers/staging/dgrp/dgrp_common.c
+++ b/drivers/staging/dgrp/dgrp_common.c
@@ -167,34 +167,3 @@ void dgrp_carrier(struct ch_struct *ch)
ch->ch_flag &= ~CH_PHYS_CD;
}
-
-/**
- * dgrp_chk_perm() -- check permissions for net device
- * @inode: pointer to inode structure for the net communication device
- * @op: operation to be tested
- *
- * The file permissions and ownerships are tested to determine whether
- * the operation "op" is permitted on the file pointed to by the inode.
- * Returns 0 if the operation is permitted, -EACCESS otherwise
- */
-int dgrp_chk_perm(int mode, int op)
-{
- if (!uid_eq(GLOBAL_ROOT_UID, current_euid()))
- mode >>= 6;
- else if (in_egroup_p(GLOBAL_ROOT_GID))
- mode >>= 3;
-
- if ((mode & op & 0007) == op)
- return 0;
-
- if (capable(CAP_SYS_ADMIN))
- return 0;
-
- return -EACCES;
-}
-
-/* dgrp_chk_perm wrapper for permission call in struct inode_operations */
-int dgrp_inode_permission(struct inode *inode, int op)
-{
- return dgrp_chk_perm(inode->i_mode, op);
-}
diff --git a/drivers/staging/dgrp/dgrp_common.h b/drivers/staging/dgrp/dgrp_common.h
index 2832b8e..23aba6c 100644
--- a/drivers/staging/dgrp/dgrp_common.h
+++ b/drivers/staging/dgrp/dgrp_common.h
@@ -49,20 +49,20 @@ extern struct dgrp_poll_data dgrp_poll_data;
extern void dgrp_poll_handler(unsigned long arg);
/* from dgrp_mon_ops.c */
-extern void dgrp_register_mon_hook(struct proc_dir_entry *de);
+extern const struct file_operations dgrp_mon_ops;
/* from dgrp_tty.c */
extern int dgrp_tty_init(struct nd_struct *nd);
extern void dgrp_tty_uninit(struct nd_struct *nd);
/* from dgrp_ports_ops.c */
-extern void dgrp_register_ports_hook(struct proc_dir_entry *de);
+extern const struct file_operations dgrp_ports_ops;
/* from dgrp_net_ops.c */
-extern void dgrp_register_net_hook(struct proc_dir_entry *de);
+extern const struct file_operations dgrp_net_ops;
/* from dgrp_dpa_ops.c */
-extern void dgrp_register_dpa_hook(struct proc_dir_entry *de);
+extern const struct file_operations dgrp_dpa_ops;
extern void dgrp_dpa_data(struct nd_struct *, int, u8 *, int);
/* from dgrp_sysfs.c */
@@ -76,61 +76,6 @@ extern void dgrp_create_tty_sysfs(struct un_struct *un, struct device *c);
extern void dgrp_remove_tty_sysfs(struct device *c);
/* from dgrp_specproc.c */
-/*
- * The list of DGRP entries with r/w capabilities. These
- * magic numbers are used for identification purposes.
- */
-enum {
- DGRP_CONFIG = 1, /* Configure portservers */
- DGRP_NETDIR = 2, /* Directory for "net" devices */
- DGRP_MONDIR = 3, /* Directory for "mon" devices */
- DGRP_PORTSDIR = 4, /* Directory for "ports" devices */
- DGRP_INFO = 5, /* Get info. about the running module */
- DGRP_NODEINFO = 6, /* Get info. about the configured nodes */
- DGRP_DPADIR = 7, /* Directory for the "dpa" devices */
-};
-
-/*
- * Directions for proc handlers
- */
-enum {
- INBOUND = 1, /* Data being written to kernel */
- OUTBOUND = 2, /* Data being read from the kernel */
-};
-
-/**
- * dgrp_proc_entry: structure for dgrp proc dirs
- * @id: ID number associated with this particular entry. Should be
- * unique across all of DGRP.
- * @name: text name associated with the /proc entry
- * @mode: file access permisssions for the /proc entry
- * @child: pointer to table describing a subdirectory for this entry
- * @de: pointer to directory entry for this object once registered. Used
- * to grab the handle of the object for unregistration
- * @excl_sem: semaphore to provide exclusive to struct
- * @excl_cnt: counter of current accesses
- *
- * Each entry in a DGRP proc directory is described with a
- * dgrp_proc_entry structure. A collection of these
- * entries (in an array) represents the members associated
- * with a particular /proc directory, and is referred to
- * as a table. All tables are terminated by an entry with
- * zeros for every member.
- */
-struct dgrp_proc_entry {
- int id; /* Integer identifier */
- const char *name; /* ASCII identifier */
- mode_t mode; /* File access permissions */
- struct dgrp_proc_entry *child; /* Child pointer */
-
- /* file ops to use, pass NULL to use default */
- struct file_operations *proc_file_ops;
-
- struct proc_dir_entry *de; /* proc entry pointer */
- struct semaphore excl_sem; /* Protects exclusive access var */
- int excl_cnt; /* Counts number of curr accesses */
-};
-
extern void dgrp_unregister_proc(void);
extern void dgrp_register_proc(void);
@@ -144,8 +89,6 @@ extern void dgrp_register_proc(void);
*-----------------------------------------------------------------------*/
void dgrp_carrier(struct ch_struct *ch);
-extern int dgrp_inode_permission(struct inode *inode, int op);
-extern int dgrp_chk_perm(int mode, int op);
/*
diff --git a/drivers/staging/dgrp/dgrp_dpa_ops.c b/drivers/staging/dgrp/dgrp_dpa_ops.c
index ca10a33..114799c 100644
--- a/drivers/staging/dgrp/dgrp_dpa_ops.c
+++ b/drivers/staging/dgrp/dgrp_dpa_ops.c
@@ -40,6 +40,7 @@
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/ratelimit.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include "dgrp_common.h"
@@ -52,7 +53,7 @@ static long dgrp_dpa_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
static unsigned int dgrp_dpa_select(struct file *, struct poll_table_struct *);
-static const struct file_operations dpa_ops = {
+const struct file_operations dgrp_dpa_ops = {
.owner = THIS_MODULE,
.read = dgrp_dpa_read,
.poll = dgrp_dpa_select,
@@ -61,12 +62,6 @@ static const struct file_operations dpa_ops = {
.release = dgrp_dpa_release,
};
-static struct inode_operations dpa_inode_ops = {
- .permission = dgrp_inode_permission
-};
-
-
-
struct digi_node {
uint nd_state; /* Node state: 1 = up, 0 = down. */
uint nd_chan_count; /* Number of channels found */
@@ -111,17 +106,6 @@ struct digi_debug {
#define DIGI_SETDEBUG (('d'<<8) | 247) /* set debug info */
-void dgrp_register_dpa_hook(struct proc_dir_entry *de)
-{
- struct nd_struct *node = de->data;
-
- de->proc_iops = &dpa_inode_ops;
- rcu_assign_pointer(de->proc_fops, &dpa_ops);
-
- node->nd_dpa_de = de;
- spin_lock_init(&node->nd_dpa_lock);
-}
-
/*
* dgrp_dpa_open -- open the DPA device for a particular PortServer
*/
@@ -130,8 +114,6 @@ static int dgrp_dpa_open(struct inode *inode, struct file *file)
struct nd_struct *nd;
int rtn = 0;
- struct proc_dir_entry *de;
-
rtn = try_module_get(THIS_MODULE);
if (!rtn)
return -ENXIO;
@@ -154,12 +136,7 @@ static int dgrp_dpa_open(struct inode *inode, struct file *file)
/*
* Get the node pointer, and fail if it doesn't exist.
*/
- de = PDE(inode);
- if (!de) {
- rtn = -ENXIO;
- goto done;
- }
- nd = (struct nd_struct *)de->data;
+ nd = PDE_DATA(inode);
if (!nd) {
rtn = -ENXIO;
goto done;
diff --git a/drivers/staging/dgrp/dgrp_mon_ops.c b/drivers/staging/dgrp/dgrp_mon_ops.c
index b484fcc..d18be41 100644
--- a/drivers/staging/dgrp/dgrp_mon_ops.c
+++ b/drivers/staging/dgrp/dgrp_mon_ops.c
@@ -37,6 +37,7 @@
#include <linux/tty.h>
#include <linux/sched.h>
#include <asm/unaligned.h>
+#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
@@ -49,7 +50,7 @@ static ssize_t dgrp_mon_read(struct file *, char __user *, size_t, loff_t *);
static long dgrp_mon_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
-static const struct file_operations mon_ops = {
+const struct file_operations dgrp_mon_ops = {
.owner = THIS_MODULE,
.read = dgrp_mon_read,
.unlocked_ioctl = dgrp_mon_ioctl,
@@ -57,20 +58,6 @@ static const struct file_operations mon_ops = {
.release = dgrp_mon_release,
};
-static struct inode_operations mon_inode_ops = {
- .permission = dgrp_inode_permission
-};
-
-void dgrp_register_mon_hook(struct proc_dir_entry *de)
-{
- struct nd_struct *node = de->data;
-
- de->proc_iops = &mon_inode_ops;
- rcu_assign_pointer(de->proc_fops, &mon_ops);
- node->nd_mon_de = de;
- sema_init(&node->nd_mon_semaphore, 1);
-}
-
/**
* dgrp_mon_open() -- open /proc/dgrp/ports device for a PortServer
* @inode: struct inode *
@@ -81,7 +68,6 @@ void dgrp_register_mon_hook(struct proc_dir_entry *de)
static int dgrp_mon_open(struct inode *inode, struct file *file)
{
struct nd_struct *nd;
- struct proc_dir_entry *de;
struct timeval tv;
uint32_t time;
u8 *buf;
@@ -109,13 +95,7 @@ static int dgrp_mon_open(struct inode *inode, struct file *file)
/*
* Get the node pointer, and fail if it doesn't exist.
*/
- de = PDE(inode);
- if (!de) {
- rtn = -ENXIO;
- goto done;
- }
-
- nd = (struct nd_struct *)de->data;
+ nd = PDE_DATA(inode);
if (!nd) {
rtn = -ENXIO;
goto done;
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 64f48ff..5b7833f 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -35,7 +35,7 @@
#include <linux/module.h>
#include <linux/proc_fs.h>
-#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/tty.h>
@@ -72,7 +72,7 @@ static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
static unsigned int dgrp_net_select(struct file *file,
struct poll_table_struct *table);
-static const struct file_operations net_ops = {
+const struct file_operations dgrp_net_ops = {
.owner = THIS_MODULE,
.read = dgrp_net_read,
.write = dgrp_net_write,
@@ -82,23 +82,6 @@ static const struct file_operations net_ops = {
.release = dgrp_net_release,
};
-static struct inode_operations net_inode_ops = {
- .permission = dgrp_inode_permission
-};
-
-void dgrp_register_net_hook(struct proc_dir_entry *de)
-{
- struct nd_struct *node = de->data;
-
- de->proc_iops = &net_inode_ops;
- rcu_assign_pointer(de->proc_fops, &net_ops);
- node->nd_net_de = de;
- sema_init(&node->nd_net_semaphore, 1);
- node->nd_state = NS_CLOSED;
- dgrp_create_node_class_sysfs_files(node);
-}
-
-
/**
* dgrp_dump() -- prints memory for debugging purposes.
* @mem: Memory location which should be printed to the console
@@ -801,7 +784,6 @@ out_err:
static int dgrp_net_open(struct inode *inode, struct file *file)
{
struct nd_struct *nd;
- struct proc_dir_entry *de;
ulong lock_flags;
int rtn;
@@ -825,13 +807,7 @@ static int dgrp_net_open(struct inode *inode, struct file *file)
/*
* Get the node pointer, and fail if it doesn't exist.
*/
- de = PDE(inode);
- if (!de) {
- rtn = -ENXIO;
- goto done;
- }
-
- nd = (struct nd_struct *) de->data;
+ nd = PDE_DATA(inode);
if (!nd) {
rtn = -ENXIO;
goto done;
diff --git a/drivers/staging/dgrp/dgrp_ports_ops.c b/drivers/staging/dgrp/dgrp_ports_ops.c
index f93dc1f..4ce0308 100644
--- a/drivers/staging/dgrp/dgrp_ports_ops.c
+++ b/drivers/staging/dgrp/dgrp_ports_ops.c
@@ -47,7 +47,7 @@
/* File operation declarations */
static int dgrp_ports_open(struct inode *, struct file *);
-static const struct file_operations ports_ops = {
+const struct file_operations dgrp_ports_ops = {
.owner = THIS_MODULE,
.open = dgrp_ports_open,
.read = seq_read,
@@ -55,20 +55,6 @@ static const struct file_operations ports_ops = {
.release = seq_release
};
-static struct inode_operations ports_inode_ops = {
- .permission = dgrp_inode_permission
-};
-
-
-void dgrp_register_ports_hook(struct proc_dir_entry *de)
-{
- struct nd_struct *node = de->data;
-
- de->proc_iops = &ports_inode_ops;
- rcu_assign_pointer(de->proc_fops, &ports_ops);
- node->nd_ports_de = de;
-}
-
static void *dgrp_ports_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0)
@@ -163,7 +149,7 @@ static int dgrp_ports_open(struct inode *inode, struct file *file)
rtn = seq_open(file, &ports_seq_ops);
if (!rtn) {
seq = file->private_data;
- seq->private = PDE(inode)->data;
+ seq->private = PDE_DATA(inode);
}
return rtn;
diff --git a/drivers/staging/dgrp/dgrp_specproc.c b/drivers/staging/dgrp/dgrp_specproc.c
index d66712c..205d80e 100644
--- a/drivers/staging/dgrp/dgrp_specproc.c
+++ b/drivers/staging/dgrp/dgrp_specproc.c
@@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/proc_fs.h>
+#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -44,43 +45,17 @@
#include "dgrp_common.h"
-static struct dgrp_proc_entry dgrp_table[];
static struct proc_dir_entry *dgrp_proc_dir_entry;
static int dgrp_add_id(long id);
static int dgrp_remove_nd(struct nd_struct *nd);
-static void unregister_dgrp_device(struct proc_dir_entry *de);
-static void register_dgrp_device(struct nd_struct *node,
+static struct proc_dir_entry *add_proc_file(struct nd_struct *node,
struct proc_dir_entry *root,
- void (*register_hook)(struct proc_dir_entry *de));
+ const struct file_operations *fops);
/* File operation declarations */
-static int dgrp_gen_proc_open(struct inode *, struct file *);
-static int dgrp_gen_proc_close(struct inode *, struct file *);
static int parse_write_config(char *);
-
-static const struct file_operations dgrp_proc_file_ops = {
- .owner = THIS_MODULE,
- .open = dgrp_gen_proc_open,
- .release = dgrp_gen_proc_close,
-};
-
-static struct inode_operations proc_inode_ops = {
- .permission = dgrp_inode_permission
-};
-
-
-static void register_proc_table(struct dgrp_proc_entry *,
- struct proc_dir_entry *);
-static void unregister_proc_table(struct dgrp_proc_entry *,
- struct proc_dir_entry *);
-
-static struct dgrp_proc_entry dgrp_net_table[];
-static struct dgrp_proc_entry dgrp_mon_table[];
-static struct dgrp_proc_entry dgrp_ports_table[];
-static struct dgrp_proc_entry dgrp_dpa_table[];
-
static ssize_t dgrp_config_proc_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *pos);
@@ -89,7 +64,7 @@ static int dgrp_nodeinfo_proc_open(struct inode *inode, struct file *file);
static int dgrp_info_proc_open(struct inode *inode, struct file *file);
static int dgrp_config_proc_open(struct inode *inode, struct file *file);
-static struct file_operations config_proc_file_ops = {
+static const struct file_operations config_proc_file_ops = {
.owner = THIS_MODULE,
.open = dgrp_config_proc_open,
.read = seq_read,
@@ -98,7 +73,7 @@ static struct file_operations config_proc_file_ops = {
.write = dgrp_config_proc_write,
};
-static struct file_operations info_proc_file_ops = {
+static const struct file_operations info_proc_file_ops = {
.owner = THIS_MODULE,
.open = dgrp_info_proc_open,
.read = seq_read,
@@ -106,7 +81,7 @@ static struct file_operations info_proc_file_ops = {
.release = single_release,
};
-static struct file_operations nodeinfo_proc_file_ops = {
+static const struct file_operations nodeinfo_proc_file_ops = {
.owner = THIS_MODULE,
.open = dgrp_nodeinfo_proc_open,
.read = seq_read,
@@ -114,71 +89,25 @@ static struct file_operations nodeinfo_proc_file_ops = {
.release = seq_release,
};
-static struct dgrp_proc_entry dgrp_table[] = {
- {
- .id = DGRP_CONFIG,
- .name = "config",
- .mode = 0644,
- .proc_file_ops = &config_proc_file_ops,
- },
- {
- .id = DGRP_INFO,
- .name = "info",
- .mode = 0644,
- .proc_file_ops = &info_proc_file_ops,
- },
- {
- .id = DGRP_NODEINFO,
- .name = "nodeinfo",
- .mode = 0644,
- .proc_file_ops = &nodeinfo_proc_file_ops,
- },
- {
- .id = DGRP_NETDIR,
- .name = "net",
- .mode = 0500,
- .child = dgrp_net_table
- },
- {
- .id = DGRP_MONDIR,
- .name = "mon",
- .mode = 0500,
- .child = dgrp_mon_table
- },
- {
- .id = DGRP_PORTSDIR,
- .name = "ports",
- .mode = 0500,
- .child = dgrp_ports_table
- },
- {
- .id = DGRP_DPADIR,
- .name = "dpa",
- .mode = 0500,
- .child = dgrp_dpa_table
- }
-};
-
static struct proc_dir_entry *net_entry_pointer;
static struct proc_dir_entry *mon_entry_pointer;
static struct proc_dir_entry *dpa_entry_pointer;
static struct proc_dir_entry *ports_entry_pointer;
-static struct dgrp_proc_entry dgrp_net_table[] = {
- {0}
-};
-
-static struct dgrp_proc_entry dgrp_mon_table[] = {
- {0}
-};
-
-static struct dgrp_proc_entry dgrp_ports_table[] = {
- {0}
-};
-
-static struct dgrp_proc_entry dgrp_dpa_table[] = {
- {0}
-};
+static void remove_files(struct nd_struct *nd)
+{
+ char buf[3];
+ ID_TO_CHAR(nd->nd_ID, buf);
+ dgrp_remove_node_class_sysfs_files(nd);
+ if (nd->nd_net_de)
+ remove_proc_entry(buf, net_entry_pointer);
+ if (nd->nd_mon_de)
+ remove_proc_entry(buf, mon_entry_pointer);
+ if (nd->nd_dpa_de)
+ remove_proc_entry(buf, dpa_entry_pointer);
+ if (nd->nd_ports_de)
+ remove_proc_entry(buf, ports_entry_pointer);
+}
void dgrp_unregister_proc(void)
{
@@ -188,12 +117,19 @@ void dgrp_unregister_proc(void)
ports_entry_pointer = NULL;
if (dgrp_proc_dir_entry) {
- unregister_proc_table(dgrp_table, dgrp_proc_dir_entry);
- remove_proc_entry(dgrp_proc_dir_entry->name,
- dgrp_proc_dir_entry->parent);
+ struct nd_struct *nd;
+ list_for_each_entry(nd, &nd_struct_list, list)
+ remove_files(nd);
+ remove_proc_entry("dgrp/config", NULL);
+ remove_proc_entry("dgrp/info", NULL);
+ remove_proc_entry("dgrp/nodeinfo", NULL);
+ remove_proc_entry("dgrp/net", NULL);
+ remove_proc_entry("dgrp/mon", NULL);
+ remove_proc_entry("dgrp/dpa", NULL);
+ remove_proc_entry("dgrp/ports", NULL);
+ remove_proc_entry("dgrp", NULL);
dgrp_proc_dir_entry = NULL;
}
-
}
void dgrp_register_proc(void)
@@ -201,211 +137,16 @@ void dgrp_register_proc(void)
/*
* Register /proc/dgrp
*/
- dgrp_proc_dir_entry = proc_create("dgrp", S_IFDIR, NULL,
- &dgrp_proc_file_ops);
- register_proc_table(dgrp_table, dgrp_proc_dir_entry);
-}
-
-/*
- * /proc/sys support
- */
-static int dgrp_proc_match(int len, const char *name, struct proc_dir_entry *de)
-{
- if (!de || !de->low_ino)
- return 0;
- if (de->namelen != len)
- return 0;
- return !memcmp(name, de->name, len);
-}
-
-
-/*
- * Scan the entries in table and add them all to /proc at the position
- * referred to by "root"
- */
-static void register_proc_table(struct dgrp_proc_entry *table,
- struct proc_dir_entry *root)
-{
- struct proc_dir_entry *de;
- int len;
- mode_t mode;
-
- if (table == NULL)
+ dgrp_proc_dir_entry = proc_mkdir("dgrp", NULL);
+ if (!dgrp_proc_dir_entry)
return;
- if (root == NULL)
- return;
-
- for (; table->id; table++) {
- /* Can't do anything without a proc name. */
- if (!table->name)
- continue;
-
- /* Maybe we can't do anything with it... */
- if (!table->proc_file_ops &&
- !table->child) {
- pr_warn("dgrp: Can't register %s\n",
- table->name);
- continue;
- }
-
- len = strlen(table->name);
- mode = table->mode;
-
- de = NULL;
- if (!table->child)
- mode |= S_IFREG;
- else {
- mode |= S_IFDIR;
- for (de = root->subdir; de; de = de->next) {
- if (dgrp_proc_match(len, table->name, de))
- break;
- }
- /* If the subdir exists already, de is non-NULL */
- }
-
- if (!de) {
- de = create_proc_entry(table->name, mode, root);
- if (!de)
- continue;
- de->data = (void *) table;
- if (!table->child) {
- de->proc_iops = &proc_inode_ops;
- if (table->proc_file_ops)
- rcu_assign_pointer(de->proc_fops,
- table->proc_file_ops);
- else
- rcu_assign_pointer(de->proc_fops,
- &dgrp_proc_file_ops);
- }
- }
- table->de = de;
- if (de->mode & S_IFDIR)
- register_proc_table(table->child, de);
-
- if (table->id == DGRP_NETDIR)
- net_entry_pointer = de;
-
- if (table->id == DGRP_MONDIR)
- mon_entry_pointer = de;
-
- if (table->id == DGRP_DPADIR)
- dpa_entry_pointer = de;
-
- if (table->id == DGRP_PORTSDIR)
- ports_entry_pointer = de;
- }
-}
-
-/*
- * Unregister a /proc sysctl table and any subdirectories.
- */
-static void unregister_proc_table(struct dgrp_proc_entry *table,
- struct proc_dir_entry *root)
-{
- struct proc_dir_entry *de;
- struct nd_struct *tmp;
-
- if (table == NULL)
- return;
-
- list_for_each_entry(tmp, &nd_struct_list, list) {
- if ((table == dgrp_net_table) && (tmp->nd_net_de)) {
- unregister_dgrp_device(tmp->nd_net_de);
- dgrp_remove_node_class_sysfs_files(tmp);
- }
-
- if ((table == dgrp_mon_table) && (tmp->nd_mon_de))
- unregister_dgrp_device(tmp->nd_mon_de);
-
- if ((table == dgrp_dpa_table) && (tmp->nd_dpa_de))
- unregister_dgrp_device(tmp->nd_dpa_de);
-
- if ((table == dgrp_ports_table) && (tmp->nd_ports_de))
- unregister_dgrp_device(tmp->nd_ports_de);
- }
-
- for (; table->id; table++) {
- de = table->de;
-
- if (!de)
- continue;
- if (de->mode & S_IFDIR) {
- if (!table->child) {
- pr_alert("dgrp: malformed sysctl tree on free\n");
- continue;
- }
- unregister_proc_table(table->child, de);
-
- /* Don't unregister directories which still have entries */
- if (de->subdir)
- continue;
- }
-
- /* Don't unregister proc entries that are still being used.. */
- if ((atomic_read(&de->count)) != 1) {
- pr_alert("proc entry %s in use, not removing\n",
- de->name);
- continue;
- }
-
- remove_proc_entry(de->name, de->parent);
- table->de = NULL;
- }
-}
-
-static int dgrp_gen_proc_open(struct inode *inode, struct file *file)
-{
- struct proc_dir_entry *de;
- struct dgrp_proc_entry *entry;
- int ret = 0;
-
- de = (struct proc_dir_entry *) PDE(file_inode(file));
- if (!de || !de->data) {
- ret = -ENXIO;
- goto done;
- }
-
- entry = (struct dgrp_proc_entry *) de->data;
- if (!entry) {
- ret = -ENXIO;
- goto done;
- }
-
- down(&entry->excl_sem);
-
- if (entry->excl_cnt)
- ret = -EBUSY;
- else
- entry->excl_cnt++;
-
- up(&entry->excl_sem);
-
-done:
- return ret;
-}
-
-static int dgrp_gen_proc_close(struct inode *inode, struct file *file)
-{
- struct proc_dir_entry *de;
- struct dgrp_proc_entry *entry;
-
- de = (struct proc_dir_entry *) PDE(file_inode(file));
- if (!de || !de->data)
- goto done;
-
- entry = (struct dgrp_proc_entry *) de->data;
- if (!entry)
- goto done;
-
- down(&entry->excl_sem);
-
- if (entry->excl_cnt)
- entry->excl_cnt = 0;
-
- up(&entry->excl_sem);
-
-done:
- return 0;
+ proc_create("dgrp/config", 0644, NULL, &config_proc_file_ops);
+ proc_create("dgrp/info", 0644, NULL, &info_proc_file_ops);
+ proc_create("dgrp/nodeinfo", 0644, NULL, &nodeinfo_proc_file_ops);
+ net_entry_pointer = proc_mkdir_mode("dgrp/net", 0500, NULL);
+ mon_entry_pointer = proc_mkdir_mode("dgrp/mon", 0500, NULL);
+ dpa_entry_pointer = proc_mkdir_mode("dgrp/dpa", 0500, NULL);
+ ports_entry_pointer = proc_mkdir_mode("dgrp/ports", 0500, NULL);
}
static void *dgrp_config_proc_start(struct seq_file *m, loff_t *pos)
@@ -736,6 +477,10 @@ static int dgrp_add_id(long id)
init_waitqueue_head(&nd->nd_tx_waitq);
init_waitqueue_head(&nd->nd_mon_wqueue);
init_waitqueue_head(&nd->nd_dpa_wqueue);
+ sema_init(&nd->nd_mon_semaphore, 1);
+ sema_init(&nd->nd_net_semaphore, 1);
+ spin_lock_init(&nd->nd_dpa_lock);
+ nd->nd_state = NS_CLOSED;
for (i = 0; i < SEQ_MAX; i++)
init_waitqueue_head(&nd->nd_seq_wque[i]);
@@ -750,12 +495,12 @@ static int dgrp_add_id(long id)
if (ret)
goto error_out;
- register_dgrp_device(nd, net_entry_pointer, dgrp_register_net_hook);
- register_dgrp_device(nd, mon_entry_pointer, dgrp_register_mon_hook);
- register_dgrp_device(nd, dpa_entry_pointer, dgrp_register_dpa_hook);
- register_dgrp_device(nd, ports_entry_pointer,
- dgrp_register_ports_hook);
-
+ dgrp_create_node_class_sysfs_files(nd);
+ nd->nd_net_de = add_proc_file(nd, net_entry_pointer, &dgrp_net_ops);
+ nd->nd_mon_de = add_proc_file(nd, mon_entry_pointer, &dgrp_mon_ops);
+ nd->nd_dpa_de = add_proc_file(nd, dpa_entry_pointer, &dgrp_dpa_ops);
+ nd->nd_ports_de = add_proc_file(nd, ports_entry_pointer,
+ &dgrp_ports_ops);
return 0;
/* FIXME this guy should free the tty driver stored in nd and destroy
@@ -774,16 +519,7 @@ static int dgrp_remove_nd(struct nd_struct *nd)
if (nd->nd_tty_ref_cnt)
return -EBUSY;
- if (nd->nd_net_de) {
- unregister_dgrp_device(nd->nd_net_de);
- dgrp_remove_node_class_sysfs_files(nd);
- }
-
- unregister_dgrp_device(nd->nd_mon_de);
-
- unregister_dgrp_device(nd->nd_ports_de);
-
- unregister_dgrp_device(nd->nd_dpa_de);
+ remove_files(nd);
dgrp_tty_uninit(nd);
@@ -795,38 +531,11 @@ static int dgrp_remove_nd(struct nd_struct *nd)
return 0;
}
-static void register_dgrp_device(struct nd_struct *node,
+static struct proc_dir_entry *add_proc_file(struct nd_struct *node,
struct proc_dir_entry *root,
- void (*register_hook)(struct proc_dir_entry *de))
+ const struct file_operations *fops)
{
char buf[3];
- struct proc_dir_entry *de;
-
ID_TO_CHAR(node->nd_ID, buf);
-
- de = create_proc_entry(buf, 0600 | S_IFREG, root);
- if (!de)
- return;
-
- de->data = (void *) node;
-
- if (register_hook)
- register_hook(de);
-
-}
-
-static void unregister_dgrp_device(struct proc_dir_entry *de)
-{
- if (!de)
- return;
-
- /* Don't unregister proc entries that are still being used.. */
- if ((atomic_read(&de->count)) != 1) {
- pr_alert("%s - proc entry %s in use. Not removing.\n",
- __func__, de->name);
- return;
- }
-
- remove_proc_entry(de->name, de->parent);
- de = NULL;
+ return proc_create_data(buf, 0600, root, fops, node);
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 5337b41..94e426e 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
@@ -29,70 +30,55 @@
#define FT1000_PROC "ft1000"
#define MAX_FILE_LEN 255
-#define PUTM_TO_PAGE(len, page, args...) \
- len += snprintf(page+len, PAGE_SIZE - len, args)
-
-#define PUTX_TO_PAGE(len, page, message, size, var) \
- len += snprintf(page+len, PAGE_SIZE - len, message); \
+#define seq_putx(m, message, size, var) \
+ seq_printf(m, message); \
for(i = 0; i < (size - 1); i++) { \
- len += snprintf(page+len, PAGE_SIZE - len, "%02x:", var[i]); \
+ seq_printf(m, "%02x:", var[i]); \
} \
- len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
+ seq_printf(m, "%02x\n", var[i])
-#define PUTD_TO_PAGE(len, page, message, size, var) \
- len += snprintf(page+len, PAGE_SIZE - len, message); \
+#define seq_putd(m, message, size, var) \
+ seq_printf(m, message); \
for(i = 0; i < (size - 1); i++) { \
- len += snprintf(page+len, PAGE_SIZE - len, "%d.", var[i]); \
+ seq_printf(m, "%d.", var[i]); \
} \
- len += snprintf(page+len, PAGE_SIZE - len, "%d\n", var[i])
+ seq_printf(m, "%d\n", var[i])
-static int ft1000ReadProc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int ft1000ReadProc(struct seq_file *m, void *v)
{
- struct net_device *dev;
- int len;
- int i;
- struct ft1000_info *info;
- char *status[] = {
+ static const char *status[] = {
"Idle (Disconnect)", "Searching", "Active (Connected)",
"Waiting for L2", "Sleep", "No Coverage", "", ""
};
- char *signal[] = { "", "*", "**", "***", "****" };
+ static const char *signal[] = { "", "*", "**", "***", "****" };
+
+ struct net_device *dev = m->private;
+ struct ft1000_info *info = netdev_priv(dev);
+ int i;
int strength;
int quality;
struct timeval tv;
time_t delta;
- dev = (struct net_device *)data;
- info = netdev_priv(dev);
-
- if (off > 0) {
- *eof = 1;
- return 0;
- }
-
- /* Wrap-around */
-
if (info->AsicID == ELECTRABUZZ_ID) {
if (info->ProgConStat != 0xFF) {
info->LedStat =
ft1000_read_dpram(dev, FT1000_DSP_LED);
info->ConStat =
- ft1000_read_dpram(dev,
- FT1000_DSP_CON_STATE);
+ ft1000_read_dpram(dev, FT1000_DSP_CON_STATE);
} else {
info->ConStat = 0xf;
}
} else {
if (info->ProgConStat != 0xFF) {
info->LedStat =
- ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_DSP_LED,
- FT1000_MAG_DSP_LED_INDX));
+ ntohs(ft1000_read_dpram_mag_16(
+ dev, FT1000_MAG_DSP_LED,
+ FT1000_MAG_DSP_LED_INDX));
info->ConStat =
- ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_DSP_CON_STATE,
- FT1000_MAG_DSP_CON_STATE_INDX));
+ ntohs(ft1000_read_dpram_mag_16(
+ dev, FT1000_MAG_DSP_CON_STATE,
+ FT1000_MAG_DSP_CON_STATE_INDX));
} else {
info->ConStat = 0xf;
}
@@ -135,36 +121,46 @@ static int ft1000ReadProc(char *page, char **start, off_t off,
}
do_gettimeofday(&tv);
- delta = (tv.tv_sec - info->ConTm);
- len = 0;
- PUTM_TO_PAGE(len, page, "Connection Time: %02ld:%02ld:%02ld\n",
+ delta = tv.tv_sec - info->ConTm;
+ seq_printf(m, "Connection Time: %02ld:%02ld:%02ld\n",
((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
- PUTM_TO_PAGE(len, page, "Connection Time[s]: %ld\n", delta);
- PUTM_TO_PAGE(len, page, "Asic ID: %s\n",
- (info->AsicID) ==
+ seq_printf(m, "Connection Time[s]: %ld\n", delta);
+ seq_printf(m, "Asic ID: %s\n",
+ info->AsicID ==
ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
- PUTX_TO_PAGE(len, page, "SKU: ", SKUSZ, info->Sku);
- PUTX_TO_PAGE(len, page, "EUI64: ", EUISZ, info->eui64);
- PUTD_TO_PAGE(len, page, "DSP version number: ", DSPVERSZ, info->DspVer);
- PUTX_TO_PAGE(len, page, "Hardware Serial Number: ", HWSERNUMSZ,
- info->HwSerNum);
- PUTX_TO_PAGE(len, page, "Caliberation Version: ", CALVERSZ,
- info->RfCalVer);
- PUTD_TO_PAGE(len, page, "Caliberation Date: ", CALDATESZ,
- info->RfCalDate);
- PUTM_TO_PAGE(len, page, "Media State: %s\n",
+ seq_putx(m, "SKU: ", SKUSZ, info->Sku);
+ seq_putx(m, "EUI64: ", EUISZ, info->eui64);
+ seq_putd(m, "DSP version number: ", DSPVERSZ, info->DspVer);
+ seq_putx(m, "Hardware Serial Number: ", HWSERNUMSZ, info->HwSerNum);
+ seq_putx(m, "Caliberation Version: ", CALVERSZ, info->RfCalVer);
+ seq_putd(m, "Caliberation Date: ", CALDATESZ, info->RfCalDate);
+ seq_printf(m, "Media State: %s\n",
(info->mediastate) ? "link" : "no link");
- PUTM_TO_PAGE(len, page, "Connection Status: %s\n",
- status[((info->ConStat) & 0x7)]);
- PUTM_TO_PAGE(len, page, "RX packets: %ld\n", info->stats.rx_packets);
- PUTM_TO_PAGE(len, page, "TX packets: %ld\n", info->stats.tx_packets);
- PUTM_TO_PAGE(len, page, "RX bytes: %ld\n", info->stats.rx_bytes);
- PUTM_TO_PAGE(len, page, "TX bytes: %ld\n", info->stats.tx_bytes);
- PUTM_TO_PAGE(len, page, "Signal Strength: %s\n", signal[strength]);
- PUTM_TO_PAGE(len, page, "Signal Quality: %s\n", signal[quality]);
- return len;
+ seq_printf(m, "Connection Status: %s\n", status[info->ConStat & 0x7]);
+ seq_printf(m, "RX packets: %ld\n", info->stats.rx_packets);
+ seq_printf(m, "TX packets: %ld\n", info->stats.tx_packets);
+ seq_printf(m, "RX bytes: %ld\n", info->stats.rx_bytes);
+ seq_printf(m, "TX bytes: %ld\n", info->stats.tx_bytes);
+ seq_printf(m, "Signal Strength: %s\n", signal[strength]);
+ seq_printf(m, "Signal Quality: %s\n", signal[quality]);
+ return 0;
+}
+
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int ft1000_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ft1000ReadProc, PDE_DATA(inode));
}
+static const struct file_operations ft1000_proc_fops = {
+ .open = ft1000_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -176,8 +172,8 @@ static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
- create_proc_read_entry(dev->name, 0644, info->ft1000_proc_dir,
- ft1000ReadProc, dev);
+ proc_create_data(dev->name, 0644, info->ft1000_proc_dir,
+ &ft1000_proc_fops, dev);
snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
break;
}
@@ -195,8 +191,10 @@ void ft1000InitProc(struct net_device *dev)
info = netdev_priv(dev);
info->ft1000_proc_dir = proc_mkdir(FT1000_PROC, init_net.proc_net);
- create_proc_read_entry(dev->name, 0644, info->ft1000_proc_dir,
- ft1000ReadProc, dev);
+
+ proc_create_data(dev->name, 0644, info->ft1000_proc_dir,
+ &ft1000_proc_fops, dev);
+
snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
register_netdevice_notifier(&ft1000_netdev_notifier);
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 297389e..3251d2e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -55,7 +55,7 @@ int numofmsgbuf = 0;
//
// Table of entry-point routines for char device
//
-static struct file_operations ft1000fops =
+static const struct file_operations ft1000fops =
{
.unlocked_ioctl = ft1000_ioctl,
.poll = ft1000_poll_dev,
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index b996406..eca6f02 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/netdevice.h>
@@ -30,22 +31,17 @@
#define FT1000_PROC_DIR "ft1000"
-#define PUTM_TO_PAGE(len,page,args...) \
- len += snprintf(page+len, PAGE_SIZE - len, args)
+#define seq_putx(m, message, size, var) \
+ seq_printf(m, message); \
+ for(i = 0; i < (size - 1); i++) \
+ seq_printf(m, "%02x:", var[i]); \
+ seq_printf(m, "%02x\n", var[i])
-#define PUTX_TO_PAGE(len,page,message,size,var) \
- len += snprintf(page+len, PAGE_SIZE - len, message); \
- for (i = 0; i < (size - 1); i++) {\
- len += snprintf(page+len, PAGE_SIZE - len, "%02x:", var[i]); \
- } \
- len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
-
-#define PUTD_TO_PAGE(len,page,message,size,var) \
- len += snprintf(page+len, PAGE_SIZE - len, message); \
- for (i = 0; i < (size - 1); i++) {\
- len += snprintf(page+len, PAGE_SIZE - len, "%d.", var[i]); \
- } \
- len += snprintf(page+len, PAGE_SIZE - len, "%d\n", var[i])
+#define seq_putd(m, message, size, var) \
+ seq_printf(m, message); \
+ for(i = 0; i < (size - 1); i++) \
+ seq_printf(m, "%d.", var[i]); \
+ seq_printf(m, "%d\n", var[i])
#define FTNET_PROC init_net.proc_net
@@ -55,19 +51,9 @@ int ft1000_read_dpram16 (struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer, u8 highlow);
-static int
-ft1000ReadProc(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int ft1000ReadProc(struct seq_file *m, void *v)
{
- struct net_device *dev;
- int len;
- int i;
- unsigned short ledStat;
- unsigned short conStat;
-
- struct ft1000_info *info;
-
- char *status[] = {
+ static const char *status[] = {
"Idle (Disconnect)",
"Searching",
"Active (Connected)",
@@ -77,22 +63,18 @@ ft1000ReadProc(char *page, char **start, off_t off, int count, int *eof,
"",
"",
};
+ static const char *signal[] = { "", "*", "**", "***", "****" };
- char *signal[] = { "", "*", "**", "***", "****" };
+ struct net_device *dev = m->private;
+ struct ft1000_info *info = netdev_priv(dev);
+ int i;
+ unsigned short ledStat;
+ unsigned short conStat;
int strength;
int quality;
struct timeval tv;
time_t delta;
- dev = (struct net_device *) data;
- info = netdev_priv(dev);
-
- if (off > 0) {
- *eof = 1;
- return 0;
- }
-
-
if (info->ProgConStat != 0xFF) {
ft1000_read_dpram16(info->priv, FT1000_MAG_DSP_LED,
(u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
@@ -144,36 +126,43 @@ ft1000ReadProc(char *page, char **start, off_t off, int count, int *eof,
quality = 0;
}
- len = 0;
- PUTM_TO_PAGE(len, page, "Connection Time: %02ld:%02ld:%02ld\n",
+ seq_printf(m, "Connection Time: %02ld:%02ld:%02ld\n",
((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
- PUTM_TO_PAGE(len, page, "Connection Time[s]: %ld\n", delta);
- PUTM_TO_PAGE(len, page, "Asic ID: %s\n",
- (info->AsicID) ==
- ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
- PUTX_TO_PAGE(len, page, "SKU: ", SKUSZ, info->Sku);
- PUTX_TO_PAGE(len, page, "EUI64: ", EUISZ, info->eui64);
- PUTD_TO_PAGE(len, page, "DSP version number: ", DSPVERSZ, info->DspVer);
- PUTX_TO_PAGE(len, page, "Hardware Serial Number: ", HWSERNUMSZ,
- info->HwSerNum);
- PUTX_TO_PAGE(len, page, "Caliberation Version: ", CALVERSZ,
- info->RfCalVer);
- PUTD_TO_PAGE(len, page, "Caliberation Date: ", CALDATESZ,
- info->RfCalDate);
- PUTM_TO_PAGE(len, page, "Media State: %s\n",
- (info->mediastate) ? "link" : "no link");
- PUTM_TO_PAGE(len, page, "Connection Status: %s\n",
- status[((info->ConStat) & 0x7)]);
- PUTM_TO_PAGE(len, page, "RX packets: %ld\n", info->stats.rx_packets);
- PUTM_TO_PAGE(len, page, "TX packets: %ld\n", info->stats.tx_packets);
- PUTM_TO_PAGE(len, page, "RX bytes: %ld\n", info->stats.rx_bytes);
- PUTM_TO_PAGE(len, page, "TX bytes: %ld\n", info->stats.tx_bytes);
- PUTM_TO_PAGE(len, page, "Signal Strength: %s\n", signal[strength]);
- PUTM_TO_PAGE(len, page, "Signal Quality: %s\n", signal[quality]);
-
- return len;
+ seq_printf(m, "Connection Time[s]: %ld\n", delta);
+ seq_printf(m, "Asic ID: %s\n",
+ (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
+ seq_putx(m, "SKU: ", SKUSZ, info->Sku);
+ seq_putx(m, "EUI64: ", EUISZ, info->eui64);
+ seq_putd(m, "DSP version number: ", DSPVERSZ, info->DspVer);
+ seq_putx(m, "Hardware Serial Number: ", HWSERNUMSZ, info->HwSerNum);
+ seq_putx(m, "Caliberation Version: ", CALVERSZ, info->RfCalVer);
+ seq_putd(m, "Caliberation Date: ", CALDATESZ, info->RfCalDate);
+ seq_printf(m, "Media State: %s\n", (info->mediastate) ? "link" : "no link");
+ seq_printf(m, "Connection Status: %s\n", status[info->ConStat & 0x7]);
+ seq_printf(m, "RX packets: %ld\n", info->stats.rx_packets);
+ seq_printf(m, "TX packets: %ld\n", info->stats.tx_packets);
+ seq_printf(m, "RX bytes: %ld\n", info->stats.rx_bytes);
+ seq_printf(m, "TX bytes: %ld\n", info->stats.tx_bytes);
+ seq_printf(m, "Signal Strength: %s\n", signal[strength]);
+ seq_printf(m, "Signal Quality: %s\n", signal[quality]);
+ return 0;
+}
+
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int ft1000_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ft1000ReadProc, PDE_DATA(inode));
}
+static const struct file_operations ft1000_proc_fops = {
+ .open = ft1000_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int
ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr)
{
@@ -186,9 +175,9 @@ ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr)
switch (event) {
case NETDEV_CHANGENAME:
remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
- ft1000_proc_file = create_proc_read_entry(dev->name, 0644,
- info->ft1000_proc_dir,
- ft1000ReadProc, dev);
+ ft1000_proc_file =
+ proc_create_data(dev->name, 0644, info->ft1000_proc_dir,
+ &ft1000_proc_fops, dev);
snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
break;
}
@@ -217,10 +206,10 @@ int ft1000_init_proc(struct net_device *dev)
}
ft1000_proc_file =
- create_proc_read_entry(dev->name, 0644,
- info->ft1000_proc_dir, ft1000ReadProc, dev);
+ proc_create_data(dev->name, 0644, info->ft1000_proc_dir,
+ &ft1000_proc_fops, dev);
- if (ft1000_proc_file == NULL) {
+ if (!ft1000_proc_file) {
printk(KERN_WARNING "Unable to create /proc entry.\n");
goto fail_entry;
}
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 6044e17..2856b8f 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -36,9 +36,6 @@
#include <linux/delay.h>
#include <linux/input.h>
-#include <mach/mxs.h>
-#include <mach/common.h>
-
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger.h>
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 055b99d..0127601 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/spinlock.h>
@@ -25,8 +26,8 @@
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of_device.h>
-#include <asm/mach/irq.h>
#include "imx-ipu-v3.h"
#include "ipu-prv.h"
@@ -661,7 +662,7 @@ int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
-static int ipu_reset(struct ipu_soc *ipu)
+static int ipu_memory_reset(struct ipu_soc *ipu)
{
unsigned long timeout;
@@ -1105,7 +1106,12 @@ static int ipu_probe(struct platform_device *pdev)
if (ret)
goto out_failed_irq;
- ret = ipu_reset(ipu);
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
+ goto out_failed_reset;
+ }
+ ret = ipu_memory_reset(ipu);
if (ret)
goto out_failed_reset;
@@ -1131,8 +1137,8 @@ static int ipu_probe(struct platform_device *pdev)
failed_add_clients:
ipu_submodules_exit(ipu);
failed_submodules_init:
- ipu_irq_exit(ipu);
out_failed_reset:
+ ipu_irq_exit(ipu);
out_failed_irq:
clk_disable_unprepare(ipu->clk);
failed_clk_get:
diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c
index 083b20e..48e1005 100644
--- a/drivers/staging/keucr/scsiglue.c
+++ b/drivers/staging/keucr/scsiglue.c
@@ -229,26 +229,18 @@ void usb_stor_report_bus_reset(struct us_data *us)
/* we use this macro to help us write into the buffer */
#undef SPRINTF
-#define SPRINTF(args...) \
- do { \
- if (pos < buffer+length) \
- pos += sprintf(pos, ## args); \
- } while (0)
+#define SPRINTF(args...) seq_printf(m, ##args)
-/*
- * proc_info()
- */
-static int proc_info(struct Scsi_Host *host, char *buffer, char **start,
- off_t offset, int length, int inout)
+static int write_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ return length;
+}
+
+static int show_info(struct seq_file *m, struct Scsi_Host *host)
{
struct us_data *us = host_to_us(host);
- char *pos = buffer;
const char *string;
- /* pr_info("scsiglue --- proc_info\n"); */
- if (inout)
- return length;
-
/* print the controller name */
SPRINTF(" Host scsi%d: usb-storage\n", host->host_no);
@@ -278,29 +270,17 @@ static int proc_info(struct Scsi_Host *host, char *buffer, char **start,
SPRINTF(" Transport: %s\n", us->transport_name);
/* show the device flags */
- if (pos < buffer + length) {
- pos += sprintf(pos, " Quirks:");
+ SPRINTF(" Quirks:");
#define US_FLAG(name, value) \
do { \
if (us->fflags & value) \
- pos += sprintf(pos, " " #name); \
+ SPRINTF(" " #name); \
} while (0);
US_DO_ALL_FLAGS
#undef US_FLAG
-
- *(pos++) = '\n';
- }
-
- /* Calculate start of next buffer, and return value. */
- *start = buffer + offset;
-
- if ((pos - buffer) < offset)
- return 0;
- else if ((pos - buffer - offset) < length)
- return pos - buffer - offset;
- else
- return length;
+ seq_putc(m, '\n');
+ return 0;
}
/***********************************************************************
@@ -351,7 +331,8 @@ struct scsi_host_template usb_stor_host_template = {
/* basic userland interface stuff */
.name = "eucr-storage",
.proc_name = "eucr-storage",
- .proc_info = proc_info,
+ .write_info = write_info,
+ .show_info = show_info,
.info = host_info,
/* command interface -- queued only */
diff --git a/drivers/staging/media/go7007/go7007-driver.c b/drivers/staging/media/go7007/go7007-driver.c
index a5ca99d..3640df0 100644
--- a/drivers/staging/media/go7007/go7007-driver.c
+++ b/drivers/staging/media/go7007/go7007-driver.c
@@ -242,11 +242,8 @@ static void go7007_remove(struct v4l2_device *v4l2_dev)
if (go->hpi_ops->release)
go->hpi_ops->release(go);
if (go->i2c_adapter_online) {
- if (i2c_del_adapter(&go->i2c_adapter) == 0)
- go->i2c_adapter_online = 0;
- else
- v4l2_err(&go->v4l2_dev,
- "error removing I2C adapter!\n");
+ i2c_del_adapter(&go->i2c_adapter);
+ go->i2c_adapter_online = 0;
}
kfree(go->boot_fw);
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index 70ea414..edacc80 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -372,7 +372,6 @@ typedef struct r8180_priv
struct Stats stats;
struct _link_detect_t link_detect; //YJ,add,080828
struct iw_statistics wstats;
- struct proc_dir_entry *dir_dev;
/*RX stuff*/
u32 *rxring;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index d10d75e..ca69155 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -36,6 +36,8 @@
#include <linux/syscalls.h>
#include <linux/eeprom_93cx6.h>
#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include "r8180_hw.h"
#include "r8180.h"
@@ -204,51 +206,35 @@ void rtl8180_start_tx_beacon(struct net_device *dev);
static struct proc_dir_entry *rtl8180_proc;
-static int proc_get_registers(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_registers(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
- int len = 0;
- int i, n;
- int max = 0xff;
+ struct net_device *dev = m->private;
+ int i, n, max = 0xff;
/* This dump the current register page */
for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ seq_printf(m, "\nD: %2x > ", n);
for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len, "%2x ",
- read_nic_byte(dev, n));
+ seq_printf(m, "%2x ", read_nic_byte(dev, n));
}
- len += snprintf(page + len, count - len, "\n");
-
- *eof = 1;
- return len;
+ seq_putc(m, '\n');
+ return 0;
}
int get_curr_tx_free_desc(struct net_device *dev, int priority);
-static int proc_get_stats_hw(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_hw(struct seq_file *m, void *v)
{
- int len = 0;
-
- *eof = 1;
- return len;
+ return 0;
}
-static int proc_get_stats_rx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_rx(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
+ struct net_device *dev = m->private;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- int len = 0;
-
- len += snprintf(page + len, count - len,
+ seq_printf(m,
"RX OK: %lu\n"
"RX Retry: %lu\n"
"RX CRC Error(0-500): %lu\n"
@@ -263,22 +249,17 @@ static int proc_get_stats_rx(char *page, char **start,
priv->stats.rxicverr
);
- *eof = 1;
- return len;
+ return 0;
}
-static int proc_get_stats_tx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_tx(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
+ struct net_device *dev = m->private;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
-
- int len = 0;
unsigned long totalOK;
totalOK = priv->stats.txnpokint+priv->stats.txhpokint+priv->stats.txlpokint;
- len += snprintf(page + len, count - len,
+ seq_printf(m,
"TX OK: %lu\n"
"TX Error: %lu\n"
"TX Retry: %lu\n"
@@ -291,8 +272,7 @@ static int proc_get_stats_tx(char *page, char **start,
priv->stats.txbeaconerr
);
- *eof = 1;
- return len;
+ return 0;
}
void rtl8180_proc_module_init(void)
@@ -308,59 +288,61 @@ void rtl8180_proc_module_remove(void)
void rtl8180_proc_remove_one(struct net_device *dev)
{
- struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- if (priv->dir_dev) {
- remove_proc_entry("stats-hw", priv->dir_dev);
- remove_proc_entry("stats-tx", priv->dir_dev);
- remove_proc_entry("stats-rx", priv->dir_dev);
- remove_proc_entry("registers", priv->dir_dev);
- priv->dir_dev = NULL;
- }
+ remove_proc_subtree(dev->name, rtl8180_proc);
}
-void rtl8180_proc_init_one(struct net_device *dev)
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int rtl8180_proc_open(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *e;
- struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
+ struct net_device *dev = proc_get_parent_data(inode);
+ int (*show)(struct seq_file *, void *) = PDE_DATA(inode);
- priv->dir_dev = rtl8180_proc;
- if (!priv->dir_dev) {
- DMESGE("Unable to initialize /proc/net/r8180/%s\n",
- dev->name);
- return;
- }
+ return single_open(file, show, dev);
+}
- e = create_proc_read_entry("stats-hw", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_hw, dev);
- if (!e) {
- DMESGE("Unable to initialize "
- "/proc/net/r8180/%s/stats-hw\n",
- dev->name);
- }
+static const struct file_operations rtl8180_proc_fops = {
+ .open = rtl8180_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
- e = create_proc_read_entry("stats-rx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_rx, dev);
- if (!e) {
- DMESGE("Unable to initialize "
- "/proc/net/r8180/%s/stats-rx\n",
- dev->name);
- }
+/*
+ * Table of proc files we need to create.
+ */
+struct rtl8180_proc_file {
+ char name[12];
+ int (*show)(struct seq_file *, void *);
+};
+static const struct rtl8180_proc_file rtl8180_proc_files[] = {
+ { "stats-hw", &proc_get_stats_hw },
+ { "stats-rx", &proc_get_stats_rx },
+ { "stats-tx", &proc_get_stats_tx },
+ { "registers", &proc_get_registers },
+ { "" }
+};
+
+void rtl8180_proc_init_one(struct net_device *dev)
+{
+ const struct rtl8180_proc_file *f;
+ struct proc_dir_entry *dir;
- e = create_proc_read_entry("stats-tx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_tx, dev);
- if (!e) {
- DMESGE("Unable to initialize "
- "/proc/net/r8180/%s/stats-tx\n",
- dev->name);
+ dir = proc_mkdir_data(dev->name, 0, rtl8180_proc, dev);
+ if (!dir) {
+ DMESGE("Unable to initialize /proc/net/r8180/%s\n", dev->name);
+ return;
}
- e = create_proc_read_entry("registers", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers, dev);
- if (!e) {
- DMESGE("Unable to initialize "
- "/proc/net/r8180/%s/registers\n",
- dev->name);
+ for (f = rtl8180_proc_files; f->name[0]; f++) {
+ if (!proc_create_data(f->name, S_IFREG | S_IRUGO, dir,
+ &rtl8180_proc_fops, f->show)) {
+ DMESGE("Unable to initialize /proc/net/r8180/%s/%s\n",
+ dev->name, f->name);
+ return;
+ }
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
index 313a92e..a2c4fb4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -7,7 +7,6 @@ r8192e_pci-objs := \
r8190P_rtl8256.o \
rtl_cam.o \
rtl_core.o \
- rtl_debug.o \
rtl_dm.o \
rtl_eeprom.o \
rtl_ethtool.o \
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 4ebf99b..2b6c61c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2966,8 +2966,6 @@ static int rtl8192_pci_probe(struct pci_dev *pdev,
goto err_free_irq;
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
- rtl8192_proc_init_one(dev);
-
if (priv->polling_timer_on == 0)
check_rfctrl_gpio_timer((unsigned long)dev);
@@ -3003,7 +3001,6 @@ static void rtl8192_pci_disconnect(struct pci_dev *pdev)
del_timer_sync(&priv->gpio_polling_timer);
cancel_delayed_work(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
- rtl8192_proc_remove_one(dev);
rtl8192_down(dev, true);
deinit_hal_dm(dev);
if (priv->pFirmware) {
@@ -3093,7 +3090,6 @@ static int __init rtl8192_pci_module_init(void)
printk(KERN_INFO "\nLinux kernel driver for RTL8192E WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan Driver\n");
- rtl8192_proc_module_init();
if (0 != pci_register_driver(&rtl8192_pci_driver)) {
DMESG("No device found");
/*pci_unregister_driver (&rtl8192_pci_driver);*/
@@ -3107,7 +3103,6 @@ static void __exit rtl8192_pci_module_exit(void)
pci_unregister_driver(&rtl8192_pci_driver);
RT_TRACE(COMP_DOWN, "Exiting");
- rtl8192_proc_module_remove();
}
void check_rfctrl_gpio_timer(unsigned long data)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 320d5fc..87d4d34 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -1085,10 +1085,4 @@ void ActUpdateChannelAccessSetting(struct net_device *dev,
enum wireless_mode WirelessMode,
struct channel_access_setting *ChnlAccessSetting);
-/* proc stuff from rtl_debug.c */
-void rtl8192_proc_init_one(struct net_device *dev);
-void rtl8192_proc_remove_one(struct net_device *dev);
-void rtl8192_proc_module_init(void);
-void rtl8192_proc_module_remove(void);
-
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
deleted file mode 100644
index c19b14c..0000000
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
+++ /dev/null
@@ -1,1029 +0,0 @@
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
-#include "rtl_core.h"
-#include "r8192E_phy.h"
-#include "r8192E_phyreg.h"
-#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
-#include "r8192E_cmdpkt.h"
-
-/****************************************************************************
- -----------------------------PROCFS STUFF-------------------------
-*****************************************************************************/
-/*This part is related to PROC, which will record some statistics. */
-static struct proc_dir_entry *rtl8192_proc;
-
-static int proc_get_stats_ap(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- struct rtllib_device *ieee = priv->rtllib;
- struct rtllib_network *target;
- int len = 0;
-
- list_for_each_entry(target, &ieee->network_list, list) {
-
- len += snprintf(page + len, count - len,
- "%s ", target->ssid);
-
- if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
- len += snprintf(page + len, count - len,
- "WPA\n");
- else
- len += snprintf(page + len, count - len,
- "non_WPA\n");
-
- }
-
- *eof = 1;
- return len;
-}
-
-static int proc_get_registers_0(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x000;
-
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0>>8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
- "0C 0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; n++, i++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x100;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0>>8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
- "0C 0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x200;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0 >> 8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B 0C "
- "0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x300;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0>>8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
- "0C 0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_4(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x400;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0>>8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
- "0C 0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_5(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x500;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0 >> 8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
- "0C 0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_6(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x600;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0>>8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
- "0C 0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_7(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x700;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n ",
- (page0 >> 8));
- len += snprintf(page + len, count - len,
- "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B 0C "
- "0D 0E 0F");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- len += snprintf(page + len, count - len,
- "%2.2x ", read_nic_byte(dev,
- (page0 | n)));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_8(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x800;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0 >> 8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-
-}
-static int proc_get_registers_9(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0x900;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0>>8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-static int proc_get_registers_a(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0xa00;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0>>8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-static int proc_get_registers_b(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0xb00;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0 >> 8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-static int proc_get_registers_c(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0xc00;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0>>8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-static int proc_get_registers_d(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0xd00;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0>>8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-static int proc_get_registers_e(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n, page0;
-
- int max = 0xff;
- page0 = 0xe00;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page %x##################\n",
- (page0>>8));
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_QueryBBReg(dev,
- (page0 | n), bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_reg_rf_a(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n;
-
- int max = 0xff;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### RF-A ##################\n ");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)RF90_PATH_A, n,
- bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_reg_rf_b(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n;
-
- int max = 0xff;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### RF-B ##################\n ");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)RF90_PATH_B, n,
- bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_reg_rf_c(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n;
-
- int max = 0xff;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### RF-C ##################\n");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)RF90_PATH_C, n,
- bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_reg_rf_d(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
-
- int len = 0;
- int i, n;
-
- int max = 0xff;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### RF-D ##################\n ");
- for (n = 0; n <= max;) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", n);
- for (i = 0; i < 4 && n <= max; n += 4, i++)
- len += snprintf(page + len, count - len,
- "%8.8x ", rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)RF90_PATH_D, n,
- bMaskDWord));
- }
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_cam_register_1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- u32 target_command = 0;
- u32 target_content = 0;
- u8 entry_i = 0;
- u32 ulStatus;
- int len = 0;
- int i = 100, j = 0;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### SECURITY CAM (0-10) ######"
- "############\n ");
- for (j = 0; j < 11; j++) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", j);
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
- target_command = entry_i+CAM_CONTENT_COUNT*j;
- target_command = target_command | BIT31;
-
- while ((i--) >= 0) {
- ulStatus = read_nic_dword(dev, RWCAM);
- if (ulStatus & BIT31)
- continue;
- else
- break;
- }
- write_nic_dword(dev, RWCAM, target_command);
- target_content = read_nic_dword(dev, RCAMO);
- len += snprintf(page + len, count - len, "%8.8x ",
- target_content);
- }
- }
-
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_cam_register_2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- u32 target_command = 0;
- u32 target_content = 0;
- u8 entry_i = 0;
- u32 ulStatus;
- int len = 0;
- int i = 100, j = 0;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### SECURITY CAM (11-21) "
- "##################\n ");
- for (j = 11; j < 22; j++) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", j);
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
- target_command = entry_i + CAM_CONTENT_COUNT * j;
- target_command = target_command | BIT31;
-
- while ((i--) >= 0) {
- ulStatus = read_nic_dword(dev, RWCAM);
- if (ulStatus & BIT31)
- continue;
- else
- break;
- }
- write_nic_dword(dev, RWCAM, target_command);
- target_content = read_nic_dword(dev, RCAMO);
- len += snprintf(page + len, count - len, "%8.8x ",
- target_content);
- }
- }
-
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-
-static int proc_get_cam_register_3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- u32 target_command = 0;
- u32 target_content = 0;
- u8 entry_i = 0;
- u32 ulStatus;
- int len = 0;
- int i = 100, j = 0;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n#################### SECURITY CAM (22-31) ######"
- "############\n ");
- for (j = 22; j < TOTAL_CAM_ENTRY; j++) {
- len += snprintf(page + len, count - len, "\nD: %2x > ", j);
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
- target_command = entry_i + CAM_CONTENT_COUNT * j;
- target_command = target_command | BIT31;
-
- while ((i--) >= 0) {
- ulStatus = read_nic_dword(dev, RWCAM);
- if (ulStatus & BIT31)
- continue;
- else
- break;
- }
- write_nic_dword(dev, RWCAM, target_command);
- target_content = read_nic_dword(dev, RCAMO);
- len += snprintf(page + len, count - len, "%8.8x ",
- target_content);
- }
- }
-
- len += snprintf(page + len, count - len, "\n");
- *eof = 1;
- return len;
-}
-static int proc_get_stats_tx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
-
- int len = 0;
-
- len += snprintf(page + len, count - len,
- "TX VI priority ok int: %lu\n"
- "TX VO priority ok int: %lu\n"
- "TX BE priority ok int: %lu\n"
- "TX BK priority ok int: %lu\n"
- "TX MANAGE priority ok int: %lu\n"
- "TX BEACON priority ok int: %lu\n"
- "TX BEACON priority error int: %lu\n"
- "TX CMDPKT priority ok int: %lu\n"
- "TX queue stopped?: %d\n"
- "TX fifo overflow: %lu\n"
- "TX total data packets %lu\n"
- "TX total data bytes :%lu\n",
- priv->stats.txviokint,
- priv->stats.txvookint,
- priv->stats.txbeokint,
- priv->stats.txbkokint,
- priv->stats.txmanageokint,
- priv->stats.txbeaconokint,
- priv->stats.txbeaconerr,
- priv->stats.txcmdpktokint,
- netif_queue_stopped(dev),
- priv->stats.txoverflow,
- priv->rtllib->stats.tx_packets,
- priv->rtllib->stats.tx_bytes
-
-
- );
-
- *eof = 1;
- return len;
-}
-
-
-
-static int proc_get_stats_rx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
-
- int len = 0;
-
- len += snprintf(page + len, count - len,
- "RX packets: %lu\n"
- "RX data crc err: %lu\n"
- "RX mgmt crc err: %lu\n"
- "RX desc err: %lu\n"
- "RX rx overflow error: %lu\n",
- priv->stats.rxint,
- priv->stats.rxdatacrcerr,
- priv->stats.rxmgmtcrcerr,
- priv->stats.rxrdu,
- priv->stats.rxoverflow);
-
- *eof = 1;
- return len;
-}
-
-void rtl8192_proc_module_init(void)
-{
- RT_TRACE(COMP_INIT, "Initializing proc filesystem");
- rtl8192_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
-}
-
-
-void rtl8192_proc_module_remove(void)
-{
- remove_proc_entry(DRV_NAME, init_net.proc_net);
-}
-
-
-void rtl8192_proc_remove_one(struct net_device *dev)
-{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
-
- printk(KERN_INFO "dev name %s\n", dev->name);
-
- if (priv->dir_dev) {
- remove_proc_entry("stats-tx", priv->dir_dev);
- remove_proc_entry("stats-rx", priv->dir_dev);
- remove_proc_entry("stats-ap", priv->dir_dev);
- remove_proc_entry("registers-0", priv->dir_dev);
- remove_proc_entry("registers-1", priv->dir_dev);
- remove_proc_entry("registers-2", priv->dir_dev);
- remove_proc_entry("registers-3", priv->dir_dev);
- remove_proc_entry("registers-4", priv->dir_dev);
- remove_proc_entry("registers-5", priv->dir_dev);
- remove_proc_entry("registers-6", priv->dir_dev);
- remove_proc_entry("registers-7", priv->dir_dev);
- remove_proc_entry("registers-8", priv->dir_dev);
- remove_proc_entry("registers-9", priv->dir_dev);
- remove_proc_entry("registers-a", priv->dir_dev);
- remove_proc_entry("registers-b", priv->dir_dev);
- remove_proc_entry("registers-c", priv->dir_dev);
- remove_proc_entry("registers-d", priv->dir_dev);
- remove_proc_entry("registers-e", priv->dir_dev);
- remove_proc_entry("RF-A", priv->dir_dev);
- remove_proc_entry("RF-B", priv->dir_dev);
- remove_proc_entry("RF-C", priv->dir_dev);
- remove_proc_entry("RF-D", priv->dir_dev);
- remove_proc_entry("SEC-CAM-1", priv->dir_dev);
- remove_proc_entry("SEC-CAM-2", priv->dir_dev);
- remove_proc_entry("SEC-CAM-3", priv->dir_dev);
- remove_proc_entry("wlan0", rtl8192_proc);
- priv->dir_dev = NULL;
- }
-}
-
-
-void rtl8192_proc_init_one(struct net_device *dev)
-{
- struct proc_dir_entry *e;
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
-
- priv->dir_dev = create_proc_entry(dev->name,
- S_IFDIR | S_IRUGO | S_IXUGO,
- rtl8192_proc);
- if (!priv->dir_dev) {
- RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192"
- "/%s\n", dev->name);
- return;
- }
- e = create_proc_read_entry("stats-rx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_rx, dev);
-
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-rx\n",
- dev->name);
-
- e = create_proc_read_entry("stats-tx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_tx, dev);
-
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-tx\n",
- dev->name);
-
- e = create_proc_read_entry("stats-ap", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_ap, dev);
-
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-ap\n",
- dev->name);
-
- e = create_proc_read_entry("registers-0", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_0, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-0\n",
- dev->name);
- e = create_proc_read_entry("registers-1", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_1, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-1\n",
- dev->name);
- e = create_proc_read_entry("registers-2", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_2, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-2\n",
- dev->name);
- e = create_proc_read_entry("registers-3", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_3, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-3\n",
- dev->name);
- e = create_proc_read_entry("registers-4", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_4, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-4\n",
- dev->name);
- e = create_proc_read_entry("registers-5", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_5, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-5\n",
- dev->name);
- e = create_proc_read_entry("registers-6", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_6, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-6\n",
- dev->name);
- e = create_proc_read_entry("registers-7", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_7, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-7\n",
- dev->name);
- e = create_proc_read_entry("registers-8", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_8, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-8\n",
- dev->name);
- e = create_proc_read_entry("registers-9", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_9, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-9\n",
- dev->name);
- e = create_proc_read_entry("registers-a", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_a, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-a\n",
- dev->name);
- e = create_proc_read_entry("registers-b", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_b, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-b\n",
- dev->name);
- e = create_proc_read_entry("registers-c", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_c, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-c\n",
- dev->name);
- e = create_proc_read_entry("registers-d", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_d, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-d\n",
- dev->name);
- e = create_proc_read_entry("registers-e", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers_e, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers-e\n",
- dev->name);
- e = create_proc_read_entry("RF-A", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_reg_rf_a, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/RF-A\n",
- dev->name);
- e = create_proc_read_entry("RF-B", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_reg_rf_b, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/RF-B\n",
- dev->name);
- e = create_proc_read_entry("RF-C", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_reg_rf_c, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/RF-C\n",
- dev->name);
- e = create_proc_read_entry("RF-D", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_reg_rf_d, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/RF-D\n",
- dev->name);
- e = create_proc_read_entry("SEC-CAM-1", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_cam_register_1, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/SEC-CAM-1\n",
- dev->name);
- e = create_proc_read_entry("SEC-CAM-2", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_cam_register_2, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/SEC-CAM-2\n",
- dev->name);
- e = create_proc_read_entry("SEC-CAM-3", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_cam_register_3, dev);
- if (!e)
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/SEC-CAM-3\n",
- dev->name);
-}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 4217b88..e51cb49 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -412,19 +412,18 @@ static int rtllib_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char *rtllib_ccmp_print_stats(char *p, void *priv)
+static void rtllib_ccmp_print_stats(struct seq_file *m, void *priv)
{
struct rtllib_ccmp_data *ccmp = priv;
- p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
- "tx_pn=%pM rx_pn=%pM "
- "format_errors=%d replays=%d decrypt_errors=%d\n",
- ccmp->key_idx, ccmp->key_set,
- ccmp->tx_pn, ccmp->rx_pn,
- ccmp->dot11RSNAStatsCCMPFormatErrors,
- ccmp->dot11RSNAStatsCCMPReplays,
- ccmp->dot11RSNAStatsCCMPDecryptErrors);
-
- return p;
+ seq_printf(m,
+ "key[%d] alg=CCMP key_set=%d "
+ "tx_pn=%pM rx_pn=%pM "
+ "format_errors=%d replays=%d decrypt_errors=%d\n",
+ ccmp->key_idx, ccmp->key_set,
+ ccmp->tx_pn, ccmp->rx_pn,
+ ccmp->dot11RSNAStatsCCMPFormatErrors,
+ ccmp->dot11RSNAStatsCCMPReplays,
+ ccmp->dot11RSNAStatsCCMPDecryptErrors);
}
static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 8009250..5cfd73b 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -708,30 +708,30 @@ static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char *rtllib_tkip_print_stats(char *p, void *priv)
+static void rtllib_tkip_print_stats(struct seq_file *m, void *priv)
{
struct rtllib_tkip_data *tkip = priv;
- p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
- "tx_pn=%02x%02x%02x%02x%02x%02x "
- "rx_pn=%02x%02x%02x%02x%02x%02x "
- "replays=%d icv_errors=%d local_mic_failures=%d\n",
- tkip->key_idx, tkip->key_set,
- (tkip->tx_iv32 >> 24) & 0xff,
- (tkip->tx_iv32 >> 16) & 0xff,
- (tkip->tx_iv32 >> 8) & 0xff,
- tkip->tx_iv32 & 0xff,
- (tkip->tx_iv16 >> 8) & 0xff,
- tkip->tx_iv16 & 0xff,
- (tkip->rx_iv32 >> 24) & 0xff,
- (tkip->rx_iv32 >> 16) & 0xff,
- (tkip->rx_iv32 >> 8) & 0xff,
- tkip->rx_iv32 & 0xff,
- (tkip->rx_iv16 >> 8) & 0xff,
- tkip->rx_iv16 & 0xff,
- tkip->dot11RSNAStatsTKIPReplays,
- tkip->dot11RSNAStatsTKIPICVErrors,
- tkip->dot11RSNAStatsTKIPLocalMICFailures);
- return p;
+ seq_printf(m,
+ "key[%d] alg=TKIP key_set=%d "
+ "tx_pn=%02x%02x%02x%02x%02x%02x "
+ "rx_pn=%02x%02x%02x%02x%02x%02x "
+ "replays=%d icv_errors=%d local_mic_failures=%d\n",
+ tkip->key_idx, tkip->key_set,
+ (tkip->tx_iv32 >> 24) & 0xff,
+ (tkip->tx_iv32 >> 16) & 0xff,
+ (tkip->tx_iv32 >> 8) & 0xff,
+ tkip->tx_iv32 & 0xff,
+ (tkip->tx_iv16 >> 8) & 0xff,
+ tkip->tx_iv16 & 0xff,
+ (tkip->rx_iv32 >> 24) & 0xff,
+ (tkip->rx_iv32 >> 16) & 0xff,
+ (tkip->rx_iv32 >> 8) & 0xff,
+ tkip->rx_iv32 & 0xff,
+ (tkip->rx_iv16 >> 8) & 0xff,
+ tkip->rx_iv16 & 0xff,
+ tkip->dot11RSNAStatsTKIPReplays,
+ tkip->dot11RSNAStatsTKIPICVErrors,
+ tkip->dot11RSNAStatsTKIPLocalMICFailures);
}
static struct lib80211_crypto_ops rtllib_crypt_tkip = {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index 8cdf389..c4df6e0 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -247,12 +247,10 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char *prism2_wep_print_stats(char *p, void *priv)
+static void prism2_wep_print_stats(struct seq_file *m, void *priv)
{
struct prism2_wep_data *wep = priv;
- p += sprintf(p, "key[%d] alg=WEP len=%d\n",
- wep->key_idx, wep->key_len);
- return p;
+ seq_printf(m, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len);
}
static struct lib80211_crypto_ops rtllib_crypt_wep = {
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index f9dae95..84ea721 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -208,61 +208,51 @@ static int debug = \
;
static struct proc_dir_entry *rtllib_proc;
-static int show_debug_level(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
+static int show_debug_level(struct seq_file *m, void *v)
{
- return snprintf(page, count, "0x%08X\n", rtllib_debug_level);
+ return seq_printf(m, "0x%08X\n", rtllib_debug_level);
}
-static int store_debug_level(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t write_debug_level(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
- char buf[] = "0x00000000";
- unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
- char *p = (char *)buf;
unsigned long val;
+ int err = kstrtoul_from_user(buffer, count, 0, &val);
+ if (err)
+ return err;
+ rtllib_debug_level = val;
+ return count;
+}
- if (copy_from_user(buf, buffer, len))
- return count;
- buf[len] = 0;
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- p++;
- if (p[0] == 'x' || p[0] == 'X')
- p++;
- val = simple_strtoul(p, &p, 16);
- } else
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- printk(KERN_INFO DRV_NAME
- ": %s is not in hex or decimal form.\n", buf);
- else
- rtllib_debug_level = val;
-
- return strnlen(buf, count);
+static int open_debug_level(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_debug_level, NULL);
}
+static const struct file_operations fops = {
+ .open = open_debug_level,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = write_debug_level
+};
+
int __init rtllib_init(void)
{
struct proc_dir_entry *e;
rtllib_debug_level = debug;
- rtllib_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
+ rtllib_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
if (rtllib_proc == NULL) {
RTLLIB_ERROR("Unable to create " DRV_NAME
" proc directory\n");
return -EIO;
}
- e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
- rtllib_proc);
+ e = proc_create("debug_level", S_IRUGO | S_IWUSR, rtllib_proc, &fops);
if (!e) {
remove_proc_entry(DRV_NAME, init_net.proc_net);
rtllib_proc = NULL;
return -EIO;
}
- e->read_proc = show_debug_level;
- e->write_proc = store_debug_level;
- e->data = NULL;
-
return 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 76c56e5..e0870c0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -243,39 +243,34 @@ static int debug = \
;
struct proc_dir_entry *ieee80211_proc;
-static int show_debug_level(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
+static int show_debug_level(struct seq_file *m, void *v)
{
- return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
+ return seq_printf(m, "0x%08X\n", ieee80211_debug_level);
}
-static int store_debug_level(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t write_debug_level(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
- char buf[] = "0x00000000";
- unsigned long len = min_t(unsigned long, sizeof(buf) - 1, count);
- char *p = (char *)buf;
unsigned long val;
+ int err = kstrtoul_from_user(buffer, count, 0, &val);
+ if (err)
+ return err;
+ ieee80211_debug_level = val;
+ return count;
+}
- if (copy_from_user(buf, buffer, len))
- return count;
- buf[len] = 0;
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- p++;
- if (p[0] == 'x' || p[0] == 'X')
- p++;
- val = simple_strtoul(p, &p, 16);
- } else
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- printk(KERN_INFO DRV_NAME
- ": %s is not in hex or decimal form.\n", buf);
- else
- ieee80211_debug_level = val;
-
- return strnlen(buf, count);
+static int open_debug_level(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_debug_level, NULL);
}
+static const struct file_operations fops = {
+ .open = open_debug_level,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = write_debug_level
+};
+
int __init ieee80211_debug_init(void)
{
struct proc_dir_entry *e;
@@ -288,17 +283,13 @@ int __init ieee80211_debug_init(void)
" proc directory\n");
return -EIO;
}
- e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
- ieee80211_proc);
+ e = proc_create("debug_level", S_IRUGO | S_IWUSR,
+ ieee80211_proc, &fops);
if (!e) {
remove_proc_entry(DRV_NAME, init_net.proc_net);
ieee80211_proc = NULL;
return -EIO;
}
- e->read_proc = show_debug_level;
- e->write_proc = store_debug_level;
- e->data = NULL;
-
return 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/proc.c b/drivers/staging/rtl8192u/ieee80211/proc.c
index 6eda928..c426dfd 100644
--- a/drivers/staging/rtl8192u/ieee80211/proc.c
+++ b/drivers/staging/rtl8192u/ieee80211/proc.c
@@ -99,7 +99,7 @@ static int crypto_info_open(struct inode *inode, struct file *file)
return seq_open(file, &crypto_seq_ops);
}
-static struct file_operations proc_crypto_ops = {
+static const struct file_operations proc_crypto_ops = {
.open = crypto_info_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -108,9 +108,5 @@ static struct file_operations proc_crypto_ops = {
void __init crypto_init_proc(void)
{
- struct proc_dir_entry *proc;
-
- proc = create_proc_entry("crypto", 0, NULL);
- if (proc)
- proc->proc_fops = &proc_crypto_ops;
+ proc_create("crypto", 0, NULL, &proc_crypto_ops);
}
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index e538e02..bedeb33 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -946,7 +946,6 @@ typedef struct r8192_priv {
/*stats*/
struct Stats stats;
struct iw_statistics wstats;
- struct proc_dir_entry *dir_dev;
/*RX stuff*/
// u32 *rxring;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index f7de2f6..71f5cde 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -71,6 +71,8 @@ double __extendsfdf2(float a) {return a;}
//#include "r8192xU_phyreg.h"
#include <linux/usb.h>
#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
// FIXME: check if 2.6.7 is ok
#ifdef CONFIG_RTL8192_PM
@@ -472,103 +474,73 @@ void watch_dog_timer_callback(unsigned long data);
static struct proc_dir_entry *rtl8192_proc;
-static int proc_get_stats_ap(char *page, char **start, off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_ap(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
+ struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
struct ieee80211_network *target;
- int len = 0;
-
list_for_each_entry(target, &ieee->network_list, list) {
-
- len += snprintf(page + len, count - len, "%s ", target->ssid);
-
+ const char *wpa = "non_WPA";
if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
- len += snprintf(page + len, count - len, "WPA\n");
- else
- len += snprintf(page + len, count - len, "non_WPA\n");
+ wpa = "WPA";
+
+ seq_printf(m, "%s %s\n", target->ssid, wpa);
}
- *eof = 1;
- return len;
+ return 0;
}
-static int proc_get_registers(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_registers(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
-// struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
+ struct net_device *dev = m->private;
+ int i,n, max = 0xff;
- int len = 0;
- int i,n;
-
- int max=0xff;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page 0##################\n ");
+ seq_puts(m, "\n####################page 0##################\n ");
for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
- len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
+ seq_printf(m, "\nD: %2x > ",n);
for (i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(dev,0x000|n));
+ seq_printf(m, "%2x ",read_nic_byte(dev,0x000|n));
// printk("%2x ",read_nic_byte(dev,n));
}
- len += snprintf(page + len, count - len,
- "\n####################page 1##################\n ");
+
+ seq_puts(m, "\n####################page 1##################\n ");
for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
- len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
+ seq_printf(m, "\nD: %2x > ",n);
for (i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(dev,0x100|n));
+ seq_printf(m, "%2x ",read_nic_byte(dev,0x100|n));
// printk("%2x ",read_nic_byte(dev,n));
}
- len += snprintf(page + len, count - len,
- "\n####################page 3##################\n ");
+
+ seq_puts(m, "\n####################page 3##################\n ");
for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
- len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
+ seq_printf(m, "\nD: %2x > ",n);
for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(dev,0x300|n));
+ seq_printf(m, "%2x ",read_nic_byte(dev,0x300|n));
// printk("%2x ",read_nic_byte(dev,n));
}
- len += snprintf(page + len, count - len,"\n");
- *eof = 1;
- return len;
+ seq_putc(m, '\n');
+ return 0;
}
-
-
-
-
-static int proc_get_stats_tx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_tx(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
+ struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- int len = 0;
-
- len += snprintf(page + len, count - len,
+ seq_printf(m,
"TX VI priority ok int: %lu\n"
"TX VI priority error int: %lu\n"
"TX VO priority ok int: %lu\n"
@@ -629,22 +601,15 @@ static int proc_get_stats_tx(char *page, char **start,
// priv->stats.txbeaconerr
);
- *eof = 1;
- return len;
+ return 0;
}
-
-
-static int proc_get_stats_rx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_rx(struct seq_file *m, void *v)
{
- struct net_device *dev = data;
+ struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- int len = 0;
-
- len += snprintf(page + len, count - len,
+ seq_printf(m,
"RX packets: %lu\n"
"RX urb status error: %lu\n"
"RX invalid urb error: %lu\n",
@@ -652,9 +617,9 @@ static int proc_get_stats_rx(char *page, char **start,
priv->stats.rxstaterr,
priv->stats.rxurberr);
- *eof = 1;
- return len;
+ return 0;
}
+
void rtl8192_proc_module_init(void)
{
RT_TRACE(COMP_INIT, "Initializing proc filesystem");
@@ -667,74 +632,70 @@ void rtl8192_proc_module_remove(void)
remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
}
-
-void rtl8192_proc_remove_one(struct net_device *dev)
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int rtl8192_proc_open(struct inode *inode, struct file *file)
{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
+ struct net_device *dev = proc_get_parent_data(inode);
+ int (*show)(struct seq_file *, void *) = PDE_DATA(inode);
-
- if (priv->dir_dev) {
- // remove_proc_entry("stats-hw", priv->dir_dev);
- remove_proc_entry("stats-tx", priv->dir_dev);
- remove_proc_entry("stats-rx", priv->dir_dev);
- // remove_proc_entry("stats-ieee", priv->dir_dev);
- remove_proc_entry("stats-ap", priv->dir_dev);
- remove_proc_entry("registers", priv->dir_dev);
- // remove_proc_entry("cck-registers",priv->dir_dev);
- // remove_proc_entry("ofdm-registers",priv->dir_dev);
- //remove_proc_entry(dev->name, rtl8192_proc);
- remove_proc_entry("wlan0", rtl8192_proc);
- priv->dir_dev = NULL;
- }
+ return single_open(file, show, dev);
}
+static const struct file_operations rtl8192_proc_fops = {
+ .open = rtl8192_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
-void rtl8192_proc_init_one(struct net_device *dev)
-{
- struct proc_dir_entry *e;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- priv->dir_dev = proc_mkdir(dev->name, rtl8192_proc);
- if (!priv->dir_dev) {
- RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
- dev->name);
- return;
- }
- e = create_proc_read_entry("stats-rx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_rx, dev);
-
- if (!e) {
- RT_TRACE(COMP_ERR,"Unable to initialize "
- "/proc/net/rtl8192/%s/stats-rx\n",
- dev->name);
- }
-
+/*
+ * Table of proc files we need to create.
+ */
+struct rtl8192_proc_file {
+ char name[12];
+ int (*show)(struct seq_file *, void *);
+};
- e = create_proc_read_entry("stats-tx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_tx, dev);
+static const struct rtl8192_proc_file rtl8192_proc_files[] = {
+ { "stats-rx", &proc_get_stats_rx },
+ { "stats-tx", &proc_get_stats_tx },
+ { "stats-ap", &proc_get_stats_ap },
+ { "registers", &proc_get_registers },
+ { "" }
+};
- if (!e) {
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-tx\n",
- dev->name);
- }
+void rtl8192_proc_init_one(struct net_device *dev)
+{
+ const struct rtl8192_proc_file *f;
+ struct proc_dir_entry *dir;
- e = create_proc_read_entry("stats-ap", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_ap, dev);
+ if (rtl8192_proc) {
+ dir = proc_mkdir_data(dev->name, 0, rtl8192_proc, dev);
+ if (!dir) {
+ RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
+ dev->name);
+ return;
+ }
- if (!e) {
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-ap\n",
- dev->name);
+ for (f = rtl8192_proc_files; f->name[0]; f++) {
+ if (!proc_create_data(f->name, S_IFREG | S_IRUGO, dir,
+ &rtl8192_proc_fops, f->show)) {
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/%s\n",
+ dev->name, f->name);
+ return;
+ }
+ }
}
+}
- e = create_proc_read_entry("registers", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers, dev);
- if (!e) {
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers\n",
- dev->name);
- }
+void rtl8192_proc_remove_one(struct net_device *dev)
+{
+ remove_proc_subtree(dev->name, rtl8192_proc);
}
+
/****************************************************************************
-----------------------------MISC STUFF-------------------------
*****************************************************************************/
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index b58f1df..6108705 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -1968,18 +1968,16 @@ int slave_configure(struct scsi_device *sdev)
/* we use this macro to help us write into the buffer */
#undef SPRINTF
-#define SPRINTF(args...) \
- do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+#define SPRINTF(args...) seq_printf(m, ##args)
-int proc_info(struct Scsi_Host *host, char *buffer,
- char **start, off_t offset, int length, int inout)
+static int write_info(struct Scsi_Host *host, char *buffer, int length)
{
- char *pos = buffer;
-
/* if someone is sending us data, just throw it away */
- if (inout)
- return length;
+ return length;
+}
+static int show_info(struct seq_file *m, struct Scsi_Host *host)
+{
/* print the controller name */
SPRINTF(" Host scsi%d: %s\n", host->host_no, RTS51X_NAME);
@@ -1988,18 +1986,7 @@ int proc_info(struct Scsi_Host *host, char *buffer,
SPRINTF(" Product: RTS51xx USB Card Reader\n");
SPRINTF(" Version: %s\n", DRIVER_VERSION);
SPRINTF(" Build: %s\n", __TIME__);
-
- /*
- * Calculate start of next buffer, and return value.
- */
- *start = buffer + offset;
-
- if ((pos - buffer) < offset)
- return 0;
- else if ((pos - buffer - offset) < length)
- return pos - buffer - offset;
- else
- return length;
+ return 0;
}
/* queue a command */
@@ -2100,7 +2087,8 @@ struct scsi_host_template rts51x_host_template = {
/* basic userland interface stuff */
.name = RTS51X_NAME,
.proc_name = RTS51X_NAME,
- .proc_info = proc_info,
+ .show_info = show_info,
+ .write_info = write_info,
.info = rts5139_info,
/* command interface -- queued only */
diff --git a/drivers/staging/rts5139/rts51x_scsi.h b/drivers/staging/rts5139/rts51x_scsi.h
index 3a52136..1a0d705 100644
--- a/drivers/staging/rts5139/rts51x_scsi.h
+++ b/drivers/staging/rts5139/rts51x_scsi.h
@@ -147,8 +147,6 @@ struct scsi_cmnd;
int slave_alloc(struct scsi_device *sdev);
int slave_configure(struct scsi_device *sdev);
-int proc_info(struct Scsi_Host *host, char *buffer,
- char **start, off_t offset, int length, int inout);
int queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int command_abort(struct scsi_cmnd *srb);
int bus_reset(struct scsi_cmnd *srb);
diff --git a/drivers/staging/silicom/Makefile b/drivers/staging/silicom/Makefile
index 80e6d12..ca83594 100644
--- a/drivers/staging/silicom/Makefile
+++ b/drivers/staging/silicom/Makefile
@@ -4,6 +4,3 @@
obj-$(CONFIG_BPCTL) += bpctl_mod.o
obj-$(CONFIG_SBYPASS) += bypasslib/
-
-
-bpctl_mod-objs := bp_mod.o bp_proc.o
diff --git a/drivers/staging/silicom/bp_proc.c b/drivers/staging/silicom/bp_proc.c
deleted file mode 100644
index a01ca97..0000000
--- a/drivers/staging/silicom/bp_proc.c
+++ /dev/null
@@ -1,1327 +0,0 @@
-/******************************************************************************/
-/* */
-/* Copyright (c) 2004-2006 Silicom, Ltd */
-/* All rights reserved. */
-/* */
-/* This program is free software; you can redistribute it and/or modify */
-/* it under the terms of the GNU General Public License as published by */
-/* the Free Software Foundation, located in the file LICENSE. */
-/* */
-/* */
-/******************************************************************************/
-
-#if defined(CONFIG_SMP) && !defined(__SMP__)
-#define __SMP__
-#endif
-
-#include <linux/proc_fs.h>
-#include <linux/netdevice.h>
-#include <asm/uaccess.h>
-/* #include <linux/smp_lock.h> */
-#include "bp_mod.h"
-
-#define BP_PROC_DIR "bypass"
-/* #define BYPASS_SUPPORT "bypass" */
-
-#ifdef BYPASS_SUPPORT
-
-#define GPIO6_SET_ENTRY_SD "gpio6_set"
-#define GPIO6_CLEAR_ENTRY_SD "gpio6_clear"
-
-#define GPIO7_SET_ENTRY_SD "gpio7_set"
-#define GPIO7_CLEAR_ENTRY_SD "gpio7_clear"
-
-#define PULSE_SET_ENTRY_SD "pulse_set"
-#define ZERO_SET_ENTRY_SD "zero_set"
-#define PULSE_GET1_ENTRY_SD "pulse_get1"
-#define PULSE_GET2_ENTRY_SD "pulse_get2"
-
-#define CMND_ON_ENTRY_SD "cmnd_on"
-#define CMND_OFF_ENTRY_SD "cmnd_off"
-#define RESET_CONT_ENTRY_SD "reset_cont"
-
- /*COMMANDS*/
-#define BYPASS_INFO_ENTRY_SD "bypass_info"
-#define BYPASS_SLAVE_ENTRY_SD "bypass_slave"
-#define BYPASS_CAPS_ENTRY_SD "bypass_caps"
-#define WD_SET_CAPS_ENTRY_SD "wd_set_caps"
-#define BYPASS_ENTRY_SD "bypass"
-#define BYPASS_CHANGE_ENTRY_SD "bypass_change"
-#define BYPASS_WD_ENTRY_SD "bypass_wd"
-#define WD_EXPIRE_TIME_ENTRY_SD "wd_expire_time"
-#define RESET_BYPASS_WD_ENTRY_SD "reset_bypass_wd"
-#define DIS_BYPASS_ENTRY_SD "dis_bypass"
-#define BYPASS_PWUP_ENTRY_SD "bypass_pwup"
-#define BYPASS_PWOFF_ENTRY_SD "bypass_pwoff"
-#define STD_NIC_ENTRY_SD "std_nic"
-#define STD_NIC_ENTRY_SD "std_nic"
-#define TAP_ENTRY_SD "tap"
-#define TAP_CHANGE_ENTRY_SD "tap_change"
-#define DIS_TAP_ENTRY_SD "dis_tap"
-#define TAP_PWUP_ENTRY_SD "tap_pwup"
-#define TWO_PORT_LINK_ENTRY_SD "two_port_link"
-#define WD_EXP_MODE_ENTRY_SD "wd_exp_mode"
-#define WD_AUTORESET_ENTRY_SD "wd_autoreset"
-#define TPL_ENTRY_SD "tpl"
-#define WAIT_AT_PWUP_ENTRY_SD "wait_at_pwup"
-#define HW_RESET_ENTRY_SD "hw_reset"
-#define DISC_ENTRY_SD "disc"
-#define DISC_CHANGE_ENTRY_SD "disc_change"
-#define DIS_DISC_ENTRY_SD "dis_disc"
-#define DISC_PWUP_ENTRY_SD "disc_pwup"
-
-static struct proc_dir_entry *bp_procfs_dir;
-
-static struct proc_dir_entry *proc_getdir(char *name,
- struct proc_dir_entry *proc_dir)
-{
- struct proc_dir_entry *pde = proc_dir;
- for (pde = pde->subdir; pde; pde = pde->next) {
- if (pde->namelen && (strcmp(name, pde->name) == 0)) {
- /* directory exists */
- break;
- }
- }
- if (pde == (struct proc_dir_entry *)0) {
- /* create the directory */
- pde = create_proc_entry(name, S_IFDIR, proc_dir);
- if (pde == (struct proc_dir_entry *)0)
- return pde;
- }
- return pde;
-}
-
-int
-bypass_proc_create_entry_sd(struct pfs_unit *pfs_unit_curr,
- char *proc_name,
- write_proc_t *write_proc,
- read_proc_t *read_proc,
- struct proc_dir_entry *parent_pfs, void *data)
-{
- strcpy(pfs_unit_curr->proc_name, proc_name);
- pfs_unit_curr->proc_entry = create_proc_entry(pfs_unit_curr->proc_name,
- S_IFREG | S_IRUSR |
- S_IWUSR | S_IRGRP |
- S_IROTH, parent_pfs);
- if (pfs_unit_curr->proc_entry == 0)
- return -1;
-
- pfs_unit_curr->proc_entry->read_proc = read_proc;
- pfs_unit_curr->proc_entry->write_proc = write_proc;
- pfs_unit_curr->proc_entry->data = data;
-
- return 0;
-
-}
-
-int
-get_bypass_info_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
- int len = 0;
-
- len += sprintf(page, "Name\t\t\t%s\n", pbp_device_block->bp_name);
- len +=
- sprintf(page + len, "Firmware version\t0x%x\n",
- pbp_device_block->bp_fw_ver);
-
- *eof = 1;
- return len;
-}
-
-int
-get_bypass_slave_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- struct pci_dev *pci_slave_dev = pbp_device_block->bp_slave;
- struct net_device *net_slave_dev;
- int len = 0;
-
- if (is_bypass_fn(pbp_device_block)) {
- net_slave_dev = pci_get_drvdata(pci_slave_dev);
- if (net_slave_dev)
- len = sprintf(page, "%s\n", net_slave_dev->name);
- else
- len = sprintf(page, "fail\n");
- } else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_bypass_caps_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_caps_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "-1\n");
- else
- len = sprintf(page, "0x%x\n", ret);
- *eof = 1;
- return len;
-
-}
-
-int
-get_wd_set_caps_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_wd_set_caps_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "-1\n");
- else
- len = sprintf(page, "0x%x\n", ret);
- *eof = 1;
- return len;
-}
-
-int
-set_bypass_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_bypass_fn(pbp_device_block, bypass_param);
-
- return count;
-}
-
-int
-set_tap_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_tap_fn(pbp_device_block, tap_param);
-
- return count;
-}
-
-int
-set_disc_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_disc_fn(pbp_device_block, tap_param);
-
- return count;
-}
-
-int
-get_bypass_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_tap_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tap_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_disc_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_disc_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_bypass_change_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_change_fn(pbp_device_block);
- if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_tap_change_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tap_change_fn(pbp_device_block);
- if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_disc_change_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_disc_change_fn(pbp_device_block);
- if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_bypass_wd_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- unsigned int timeout = 0;
- char *timeout_ptr = kbuf;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- timeout_ptr = kbuf;
- timeout = atoi(&timeout_ptr);
-
- set_bypass_wd_fn(pbp_device_block, timeout);
-
- return count;
-}
-
-int
-get_bypass_wd_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0, timeout = 0;
-
- ret = get_bypass_wd_fn(pbp_device_block, &timeout);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (timeout == -1)
- len = sprintf(page, "unknown\n");
- else if (timeout == 0)
- len = sprintf(page, "disable\n");
- else
- len = sprintf(page, "%d\n", timeout);
-
- *eof = 1;
- return len;
-}
-
-int
-get_wd_expire_time_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0, timeout = 0;
-
- ret = get_wd_expire_time_fn(pbp_device_block, &timeout);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (timeout == -1)
- len = sprintf(page, "expire\n");
- else if (timeout == 0)
- len = sprintf(page, "disable\n");
-
- else
- len = sprintf(page, "%d\n", timeout);
- *eof = 1;
- return len;
-}
-
-int
-get_tpl_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tpl_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
-}
-
-#ifdef PMC_FIX_FLAG
-int
-get_wait_at_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bp_wait_at_pwup_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_hw_reset_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bp_hw_reset_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
-}
-
-#endif /*PMC_WAIT_FLAG */
-
-int
-reset_bypass_wd_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = reset_bypass_wd_timer_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "disable\n");
- else if (ret == 1)
- len = sprintf(page, "success\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_dis_bypass_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_dis_bypass_fn(pbp_device_block, bypass_param);
-
- return count;
-}
-
-int
-set_dis_tap_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_dis_tap_fn(pbp_device_block, tap_param);
-
- return count;
-}
-
-int
-set_dis_disc_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_dis_disc_fn(pbp_device_block, tap_param);
-
- return count;
-}
-
-int
-get_dis_bypass_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_dis_bypass_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_dis_tap_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_dis_tap_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_dis_disc_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_dis_disc_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_bypass_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_bypass_pwup_fn(pbp_device_block, bypass_param);
-
- return count;
-}
-
-int
-set_bypass_pwoff_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_bypass_pwoff_fn(pbp_device_block, bypass_param);
-
- return count;
-}
-
-int
-set_tap_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_tap_pwup_fn(pbp_device_block, tap_param);
-
- return count;
-}
-
-int
-set_disc_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_disc_pwup_fn(pbp_device_block, tap_param);
-
- return count;
-}
-
-int
-get_bypass_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_pwup_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_bypass_pwoff_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_pwoff_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_tap_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tap_pwup_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_disc_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_disc_pwup_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_std_nic_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_std_nic_fn(pbp_device_block, bypass_param);
-
- return count;
-}
-
-int
-get_std_nic_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_std_nic_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_wd_exp_mode_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_wd_exp_mode_fn(pbp_device_block);
- if (ret == 1)
- len = sprintf(page, "tap\n");
- else if (ret == 0)
- len = sprintf(page, "bypass\n");
- else if (ret == 2)
- len = sprintf(page, "disc\n");
-
- else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_wd_exp_mode_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "tap") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "bypass") == 0)
- bypass_param = 0;
- else if (strcmp(kbuf, "disc") == 0)
- bypass_param = 2;
-
- set_wd_exp_mode_fn(pbp_device_block, bypass_param);
-
- return count;
-}
-
-int
-get_wd_autoreset_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_wd_autoreset_fn(pbp_device_block);
- if (ret >= 0)
- len = sprintf(page, "%d\n", ret);
- else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_wd_autoreset_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
- u32 timeout = 0;
- char *timeout_ptr = kbuf;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- timeout_ptr = kbuf;
- timeout = atoi(&timeout_ptr);
-
- set_wd_autoreset_fn(pbp_device_block, timeout);
-
- return count;
-}
-
-int
-set_tpl_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tpl_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tpl_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tpl_param = 0;
-
- set_tpl_fn(pbp_device_block, tpl_param);
-
- return count;
-}
-
-#ifdef PMC_FIX_FLAG
-int
-set_wait_at_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tpl_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tpl_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tpl_param = 0;
-
- set_bp_wait_at_pwup_fn(pbp_device_block, tpl_param);
-
- return count;
-}
-
-int
-set_hw_reset_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tpl_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count))
- return -1;
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tpl_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tpl_param = 0;
-
- set_bp_hw_reset_fn(pbp_device_block, tpl_param);
-
- return count;
-}
-
-#endif /*PMC_FIX_FLAG */
-
-int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
-{
- struct bypass_pfs_sd *current_pfs = &(pbp_device_block->bypass_pfs_set);
- static struct proc_dir_entry *procfs_dir;
- int ret = 0;
-
- sprintf(current_pfs->dir_name, "bypass_%s", dev->name);
-
- if (!bp_procfs_dir)
- return -1;
-
- /* create device proc dir */
- procfs_dir = proc_getdir(current_pfs->dir_name, bp_procfs_dir);
- if (procfs_dir == 0) {
- printk(KERN_DEBUG "Could not create procfs directory %s\n",
- current_pfs->dir_name);
- return -1;
- }
- current_pfs->bypass_entry = procfs_dir;
-
- if (bypass_proc_create_entry(&(current_pfs->bypass_info), BYPASS_INFO_ENTRY_SD, NULL, /* write */
- get_bypass_info_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (pbp_device_block->bp_caps & SW_CTL_CAP) {
-
- /* Create set param proc's */
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_slave), BYPASS_SLAVE_ENTRY_SD, NULL, /* write */
- get_bypass_slave_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_caps), BYPASS_CAPS_ENTRY_SD, NULL, /* write */
- get_bypass_caps_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_set_caps), WD_SET_CAPS_ENTRY_SD, NULL, /* write */
- get_wd_set_caps_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_wd), BYPASS_WD_ENTRY_SD, set_bypass_wd_pfs, /* write */
- get_bypass_wd_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_expire_time), WD_EXPIRE_TIME_ENTRY_SD, NULL, /* write */
- get_wd_expire_time_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->reset_bypass_wd), RESET_BYPASS_WD_ENTRY_SD, NULL, /* write */
- reset_bypass_wd_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->std_nic), STD_NIC_ENTRY_SD, set_std_nic_pfs, /* write */
- get_std_nic_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (pbp_device_block->bp_caps & BP_CAP) {
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass), BYPASS_ENTRY_SD, set_bypass_pfs, /* write */
- get_bypass_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->dis_bypass), DIS_BYPASS_ENTRY_SD, set_dis_bypass_pfs, /* write */
- get_dis_bypass_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_pwup), BYPASS_PWUP_ENTRY_SD, set_bypass_pwup_pfs, /* write */
- get_bypass_pwup_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_pwoff), BYPASS_PWOFF_ENTRY_SD, set_bypass_pwoff_pfs, /* write */
- get_bypass_pwoff_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_change), BYPASS_CHANGE_ENTRY_SD, NULL, /* write */
- get_bypass_change_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
- }
-
- if (pbp_device_block->bp_caps & TAP_CAP) {
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap), TAP_ENTRY_SD, set_tap_pfs, /* write */
- get_tap_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->dis_tap), DIS_TAP_ENTRY_SD, set_dis_tap_pfs, /* write */
- get_dis_tap_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_pwup), TAP_PWUP_ENTRY_SD, set_tap_pwup_pfs, /* write */
- get_tap_pwup_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_change), TAP_CHANGE_ENTRY_SD, NULL, /* write */
- get_tap_change_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
- }
- if (pbp_device_block->bp_caps & DISC_CAP) {
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap), DISC_ENTRY_SD, set_disc_pfs, /* write */
- get_disc_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-#if 1
-
- if (bypass_proc_create_entry_sd(&(current_pfs->dis_tap), DIS_DISC_ENTRY_SD, set_dis_disc_pfs, /* write */
- get_dis_disc_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-#endif
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_pwup), DISC_PWUP_ENTRY_SD, set_disc_pwup_pfs, /* write */
- get_disc_pwup_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_change), DISC_CHANGE_ENTRY_SD, NULL, /* write */
- get_disc_change_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
- }
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_exp_mode), WD_EXP_MODE_ENTRY_SD, set_wd_exp_mode_pfs, /* write */
- get_wd_exp_mode_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_autoreset), WD_AUTORESET_ENTRY_SD, set_wd_autoreset_pfs, /* write */
- get_wd_autoreset_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->tpl), TPL_ENTRY_SD, set_tpl_pfs, /* write */
- get_tpl_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-#ifdef PMC_FIX_FLAG
- if (bypass_proc_create_entry_sd(&(current_pfs->tpl), WAIT_AT_PWUP_ENTRY_SD, set_wait_at_pwup_pfs, /* write */
- get_wait_at_pwup_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->tpl), HW_RESET_ENTRY_SD, set_hw_reset_pfs, /* write */
- get_hw_reset_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
-#endif
-
- }
- if (ret < 0)
- printk(KERN_DEBUG "Create proc entry failed\n");
-
- return ret;
-}
-
-int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block)
-{
-
- struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
- struct proc_dir_entry *pde = current_pfs->bypass_entry, *pde_curr =
- NULL;
- char name[256];
-
- for (pde = pde->subdir; pde;) {
- strcpy(name, pde->name);
- pde_curr = pde;
- pde = pde->next;
- remove_proc_entry(name, current_pfs->bypass_entry);
- }
- if (!pde)
- remove_proc_entry(current_pfs->dir_name, bp_procfs_dir);
-
- return 0;
-}
-
-#endif /* BYPASS_SUPPORT */
diff --git a/drivers/staging/silicom/bp_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 45a2227..b7e570c 100644
--- a/drivers/staging/silicom/bp_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -35,7 +35,6 @@
#define BP_MOD_DESCR "Silicom Bypass-SD Control driver"
#define BP_SYNC_FLAG 1
-static int Device_Open = 0;
static int major_num = 0;
MODULE_AUTHOR("Anna Lukin, annal@silicom.co.il");
@@ -60,35 +59,9 @@ typedef enum {
bp_none,
} bp_media_type;
-struct pfs_unit_sd {
- struct proc_dir_entry *proc_entry;
- char proc_name[32];
-};
-
struct bypass_pfs_sd {
char dir_name[32];
struct proc_dir_entry *bypass_entry;
- struct pfs_unit_sd bypass_info;
- struct pfs_unit_sd bypass_slave;
- struct pfs_unit_sd bypass_caps;
- struct pfs_unit_sd wd_set_caps;
- struct pfs_unit_sd bypass;
- struct pfs_unit_sd bypass_change;
- struct pfs_unit_sd bypass_wd;
- struct pfs_unit_sd wd_expire_time;
- struct pfs_unit_sd reset_bypass_wd;
- struct pfs_unit_sd dis_bypass;
- struct pfs_unit_sd bypass_pwup;
- struct pfs_unit_sd bypass_pwoff;
- struct pfs_unit_sd std_nic;
- struct pfs_unit_sd tap;
- struct pfs_unit_sd dis_tap;
- struct pfs_unit_sd tap_pwup;
- struct pfs_unit_sd tap_change;
- struct pfs_unit_sd wd_exp_mode;
- struct pfs_unit_sd wd_autoreset;
- struct pfs_unit_sd tpl;
-
};
typedef struct _bpctl_dev {
@@ -315,27 +288,6 @@ static struct notifier_block bp_notifier_block = {
.notifier_call = bp_device_event,
};
-static int device_open(struct inode *inode, struct file *file)
-{
-#ifdef DEBUG
- printk("device_open(%p)\n", file);
-#endif
- Device_Open++;
-/*
-* Initialize the message
-*/
- return SUCCESS;
-}
-
-static int device_release(struct inode *inode, struct file *file)
-{
-#ifdef DEBUG
- printk("device_release(%p,%p)\n", inode, file);
-#endif
- Device_Open--;
- return SUCCESS;
-}
-
int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
int wdt_time_left(bpctl_dev_t *pbpctl_dev);
@@ -1728,62 +1680,33 @@ int gpio6_clear_fn(bpctl_dev_t *pbpctl_dev)
}
#endif /*BYPASS_DEBUG */
-static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
+static bpctl_dev_t *lookup_port(bpctl_dev_t *dev)
{
- int idx_dev = 0;
-
- if (pbpctl_dev == NULL)
- return NULL;
-
- if ((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) {
- for (idx_dev = 0;
- ((bpctl_dev_arr[idx_dev].pdev != NULL)
- && (idx_dev < device_num)); idx_dev++) {
- if ((bpctl_dev_arr[idx_dev].bus == pbpctl_dev->bus)
- && (bpctl_dev_arr[idx_dev].slot == pbpctl_dev->slot)
- && ((bpctl_dev_arr[idx_dev].func == 1)
- && (pbpctl_dev->func == 0))) {
-
- return &(bpctl_dev_arr[idx_dev]);
- }
- if ((bpctl_dev_arr[idx_dev].bus == pbpctl_dev->bus) &&
- (bpctl_dev_arr[idx_dev].slot == pbpctl_dev->slot) &&
- ((bpctl_dev_arr[idx_dev].func == 3)
- && (pbpctl_dev->func == 2))) {
+ bpctl_dev_t *p;
+ int n;
+ for (n = 0, p = bpctl_dev_arr; n < device_num && p->pdev; n++) {
+ if (p->bus == dev->bus
+ && p->slot == dev->slot
+ && p->func == (dev->func ^ 1))
+ return p;
+ }
+ return NULL;
+}
- return &(bpctl_dev_arr[idx_dev]);
- }
- }
+static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
+{
+ if (pbpctl_dev) {
+ if (pbpctl_dev->func == 0 || pbpctl_dev->func == 2)
+ return lookup_port(pbpctl_dev);
}
return NULL;
}
static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev)
{
- int idx_dev = 0;
-
- if (pbpctl_dev == NULL)
- return NULL;
-
- if ((pbpctl_dev->func == 1) || (pbpctl_dev->func == 3)) {
- for (idx_dev = 0;
- ((bpctl_dev_arr[idx_dev].pdev != NULL)
- && (idx_dev < device_num)); idx_dev++) {
- if ((bpctl_dev_arr[idx_dev].bus == pbpctl_dev->bus)
- && (bpctl_dev_arr[idx_dev].slot == pbpctl_dev->slot)
- && ((bpctl_dev_arr[idx_dev].func == 0)
- && (pbpctl_dev->func == 1))) {
-
- return &(bpctl_dev_arr[idx_dev]);
- }
- if ((bpctl_dev_arr[idx_dev].bus == pbpctl_dev->bus) &&
- (bpctl_dev_arr[idx_dev].slot == pbpctl_dev->slot) &&
- ((bpctl_dev_arr[idx_dev].func == 2)
- && (pbpctl_dev->func == 3))) {
-
- return &(bpctl_dev_arr[idx_dev]);
- }
- }
+ if (pbpctl_dev) {
+ if (pbpctl_dev->func == 1 || pbpctl_dev->func == 3)
+ return lookup_port(pbpctl_dev);
}
return NULL;
}
@@ -5858,11 +5781,9 @@ static long device_ioctl(struct file *file, /* see include/linux/fs.h */
return ret;
}
-struct file_operations Fops = {
+static const struct file_operations Fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = device_ioctl,
- .open = device_open,
- .release = device_release, /* a.k.a. close */
};
#ifndef PCI_DEVICE
@@ -6686,6 +6607,118 @@ static bpmod_info_t tx_ctl_pci_tbl[] = {
{0,}
};
+static void find_fw(bpctl_dev_t *dev)
+{
+ unsigned long mmio_start, mmio_len;
+ struct pci_dev *pdev1 = dev->pdev;
+
+ if ((OLD_IF_SERIES(dev->subdevice)) ||
+ (INTEL_IF_SERIES(dev->subdevice)))
+ dev->bp_fw_ver = 0xff;
+ else
+ dev->bp_fw_ver = bypass_fw_ver(dev);
+
+ if (dev->bp_10gb == 1 && dev->bp_fw_ver == 0xff) {
+ int cnt = 100;
+ while (cnt--) {
+ iounmap((void *)dev->mem_map);
+ mmio_start = pci_resource_start(pdev1, 0);
+ mmio_len = pci_resource_len(pdev1, 0);
+
+ dev->mem_map = (unsigned long)
+ ioremap(mmio_start, mmio_len);
+
+ dev->bp_fw_ver = bypass_fw_ver(dev);
+ if (dev-> bp_fw_ver == 0xa8)
+ break;
+ }
+ }
+ /* dev->bp_fw_ver=0xa8; */
+ printk("firmware version: 0x%x\n", dev->bp_fw_ver);
+}
+
+static int init_one(bpctl_dev_t *dev, bpmod_info_t *info, struct pci_dev *pdev1)
+{
+ unsigned long mmio_start, mmio_len;
+
+ dev->pdev = pdev1;
+ mmio_start = pci_resource_start(pdev1, 0);
+ mmio_len = pci_resource_len(pdev1, 0);
+
+ dev->desc = dev_desc[info->index].name;
+ dev->name = info->bp_name;
+ dev->device = info->device;
+ dev->vendor = info->vendor;
+ dev->subdevice = info->subdevice;
+ dev->subvendor = info->subvendor;
+ dev->func = PCI_FUNC(pdev1->devfn);
+ dev->slot = PCI_SLOT(pdev1->devfn);
+ dev->bus = pdev1->bus->number;
+ dev->mem_map = (unsigned long)ioremap(mmio_start, mmio_len);
+#ifdef BP_SYNC_FLAG
+ spin_lock_init(&dev->bypass_wr_lock);
+#endif
+ if (BP10G9_IF_SERIES(dev->subdevice))
+ dev->bp_10g9 = 1;
+ if (BP10G_IF_SERIES(dev->subdevice))
+ dev->bp_10g = 1;
+ if (PEG540_IF_SERIES(dev->subdevice))
+ dev->bp_540 = 1;
+ if (PEGF5_IF_SERIES(dev->subdevice))
+ dev->bp_fiber5 = 1;
+ if (PEG80_IF_SERIES(dev->subdevice))
+ dev->bp_i80 = 1;
+ if (PEGF80_IF_SERIES(dev->subdevice))
+ dev->bp_i80 = 1;
+ if ((dev->subdevice & 0xa00) == 0xa00)
+ dev->bp_i80 = 1;
+ if (BP10GB_IF_SERIES(dev->subdevice)) {
+ if (dev->ifindex == 0) {
+ unregister_chrdev(major_num, DEVICE_NAME);
+ printk("Please load network driver for %s adapter!\n",
+ dev->name);
+ return -1;
+ }
+
+ if (dev->ndev && !(dev->ndev->flags & IFF_UP)) {
+ unregister_chrdev(major_num, DEVICE_NAME);
+ printk("Please bring up network interfaces for %s adapter!\n",
+ dev->name);
+ return -1;
+ }
+ dev->bp_10gb = 1;
+ }
+
+ if (!dev->bp_10g9) {
+ if (is_bypass_fn(dev)) {
+ printk(KERN_INFO "%s found, ",
+ dev->name);
+ find_fw(dev);
+ }
+ dev->wdt_status = WDT_STATUS_UNKNOWN;
+ dev->reset_time = 0;
+ atomic_set(&dev->wdt_busy, 0);
+ dev->bp_status_un = 1;
+
+ bypass_caps_init(dev);
+
+ init_bypass_wd_auto(dev);
+ init_bypass_tpl_auto(dev);
+ if (NOKIA_SERIES(dev->subdevice))
+ reset_cont(dev);
+ }
+#ifdef BP_SELF_TEST
+ if ((dev->bp_tx_data = kzalloc(BPTEST_DATA_LEN, GFP_KERNEL))) {
+ memset(dev->bp_tx_data, 0xff, 6);
+ memset(dev->bp_tx_data + 6, 0x0, 1);
+ memset(dev->bp_tx_data + 7, 0xaa, 5);
+ *(__be16 *)(dev->bp_tx_data + 12) = htons(ETH_P_BPTEST);
+ } else
+ printk("bp_ctl: Memory allocation error!\n");
+#endif
+ return 0;
+}
+
/*
* Initialize the module - Register the character device
*/
@@ -6694,7 +6727,7 @@ static int __init bypass_init_module(void)
{
int ret_val, idx, idx_dev = 0;
struct pci_dev *pdev1 = NULL;
- unsigned long mmio_start, mmio_len;
+ bpctl_dev_t *dev;
printk(BP_MOD_DESCR " v" BP_MOD_VER "\n");
ret_val = register_chrdev(major_num, DEVICE_NAME, &Fops);
@@ -6729,181 +6762,16 @@ static int __init bypass_init_module(void)
memset(bpctl_dev_arr, 0, ((device_num) * sizeof(bpctl_dev_t)));
pdev1 = NULL;
+ dev = bpctl_dev_arr;
for (idx = 0; tx_ctl_pci_tbl[idx].vendor; idx++) {
while ((pdev1 = pci_get_subsys(tx_ctl_pci_tbl[idx].vendor,
tx_ctl_pci_tbl[idx].device,
tx_ctl_pci_tbl[idx].subvendor,
tx_ctl_pci_tbl[idx].subdevice,
pdev1))) {
- bpctl_dev_arr[idx_dev].pdev = pdev1;
-
- mmio_start = pci_resource_start(pdev1, 0);
- mmio_len = pci_resource_len(pdev1, 0);
-
- bpctl_dev_arr[idx_dev].desc =
- dev_desc[tx_ctl_pci_tbl[idx].index].name;
- bpctl_dev_arr[idx_dev].name =
- tx_ctl_pci_tbl[idx].bp_name;
- bpctl_dev_arr[idx_dev].device =
- tx_ctl_pci_tbl[idx].device;
- bpctl_dev_arr[idx_dev].vendor =
- tx_ctl_pci_tbl[idx].vendor;
- bpctl_dev_arr[idx_dev].subdevice =
- tx_ctl_pci_tbl[idx].subdevice;
- bpctl_dev_arr[idx_dev].subvendor =
- tx_ctl_pci_tbl[idx].subvendor;
- /* bpctl_dev_arr[idx_dev].pdev=pdev1; */
- bpctl_dev_arr[idx_dev].func = PCI_FUNC(pdev1->devfn);
- bpctl_dev_arr[idx_dev].slot = PCI_SLOT(pdev1->devfn);
- bpctl_dev_arr[idx_dev].bus = pdev1->bus->number;
- bpctl_dev_arr[idx_dev].mem_map =
- (unsigned long)ioremap(mmio_start, mmio_len);
-#ifdef BP_SYNC_FLAG
- spin_lock_init(&bpctl_dev_arr[idx_dev].bypass_wr_lock);
-#endif
- if (BP10G9_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice))
- bpctl_dev_arr[idx_dev].bp_10g9 = 1;
- if (BP10G_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice))
- bpctl_dev_arr[idx_dev].bp_10g = 1;
- if (PEG540_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice)) {
-
- bpctl_dev_arr[idx_dev].bp_540 = 1;
- }
- if (PEGF5_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice))
- bpctl_dev_arr[idx_dev].bp_fiber5 = 1;
- if (PEG80_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice))
- bpctl_dev_arr[idx_dev].bp_i80 = 1;
- if (PEGF80_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice))
- bpctl_dev_arr[idx_dev].bp_i80 = 1;
- if ((bpctl_dev_arr[idx_dev].subdevice & 0xa00) == 0xa00)
- bpctl_dev_arr[idx_dev].bp_i80 = 1;
- if (BP10GB_IF_SERIES(bpctl_dev_arr[idx_dev].subdevice)) {
- if (bpctl_dev_arr[idx_dev].ifindex == 0) {
- unregister_chrdev(major_num,
- DEVICE_NAME);
- printk
- ("Please load network driver for %s adapter!\n",
- bpctl_dev_arr[idx_dev].name);
- return -1;
- }
-
- if (bpctl_dev_arr[idx_dev].ndev) {
- if (!
- (bpctl_dev_arr[idx_dev].ndev->
- flags & IFF_UP)) {
- if (!
- (bpctl_dev_arr[idx_dev].
- ndev->flags & IFF_UP)) {
- unregister_chrdev
- (major_num,
- DEVICE_NAME);
- printk
- ("Please bring up network interfaces for %s adapter!\n",
- bpctl_dev_arr
- [idx_dev].name);
- return -1;
- }
-
- }
- }
- bpctl_dev_arr[idx_dev].bp_10gb = 1;
- }
-
- if (!bpctl_dev_arr[idx_dev].bp_10g9) {
-
- if (is_bypass_fn(&bpctl_dev_arr[idx_dev])) {
- printk(KERN_INFO "%s found, ",
- bpctl_dev_arr[idx_dev].name);
- if ((OLD_IF_SERIES
- (bpctl_dev_arr[idx_dev].subdevice))
- ||
- (INTEL_IF_SERIES
- (bpctl_dev_arr[idx_dev].
- subdevice)))
- bpctl_dev_arr[idx_dev].
- bp_fw_ver = 0xff;
- else
- bpctl_dev_arr[idx_dev].
- bp_fw_ver =
- bypass_fw_ver(&bpctl_dev_arr
- [idx_dev]);
- if ((bpctl_dev_arr[idx_dev].bp_10gb ==
- 1)
- && (bpctl_dev_arr[idx_dev].
- bp_fw_ver == 0xff)) {
- int cnt = 100;
- while (cnt--) {
- iounmap((void
- *)
- (bpctl_dev_arr
- [idx_dev].
- mem_map));
- mmio_start =
- pci_resource_start
- (pdev1, 0);
- mmio_len =
- pci_resource_len
- (pdev1, 0);
-
- bpctl_dev_arr[idx_dev].
- mem_map =
- (unsigned long)
- ioremap(mmio_start,
- mmio_len);
-
- bpctl_dev_arr[idx_dev].
- bp_fw_ver =
- bypass_fw_ver
- (&bpctl_dev_arr
- [idx_dev]);
- if (bpctl_dev_arr
- [idx_dev].
- bp_fw_ver == 0xa8)
- break;
-
- }
- }
- /* bpctl_dev_arr[idx_dev].bp_fw_ver=0xa8; */
- printk("firmware version: 0x%x\n",
- bpctl_dev_arr[idx_dev].
- bp_fw_ver);
- }
- bpctl_dev_arr[idx_dev].wdt_status =
- WDT_STATUS_UNKNOWN;
- bpctl_dev_arr[idx_dev].reset_time = 0;
- atomic_set(&bpctl_dev_arr[idx_dev].wdt_busy, 0);
- bpctl_dev_arr[idx_dev].bp_status_un = 1;
-
- bypass_caps_init(&bpctl_dev_arr[idx_dev]);
-
- init_bypass_wd_auto(&bpctl_dev_arr[idx_dev]);
- init_bypass_tpl_auto(&bpctl_dev_arr[idx_dev]);
- if (NOKIA_SERIES
- (bpctl_dev_arr[idx_dev].subdevice))
- reset_cont(&bpctl_dev_arr[idx_dev]);
- }
-#ifdef BP_SELF_TEST
- if ((bpctl_dev_arr[idx_dev].bp_tx_data =
- kmalloc(BPTEST_DATA_LEN, GFP_KERNEL))) {
-
- memset(bpctl_dev_arr[idx_dev].bp_tx_data, 0x0,
- BPTEST_DATA_LEN);
-
- memset(bpctl_dev_arr[idx_dev].bp_tx_data, 0xff,
- 6);
- memset(bpctl_dev_arr[idx_dev].bp_tx_data + 6,
- 0x0, 1);
- memset(bpctl_dev_arr[idx_dev].bp_tx_data + 7,
- 0xaa, 5);
-
- *(__be16 *) (bpctl_dev_arr[idx_dev].bp_tx_data +
- 12) = htons(ETH_P_BPTEST);
-
- } else
- printk("bp_ctl: Memory allocation error!\n");
-#endif
- idx_dev++;
-
+ if (init_one(dev, &tx_ctl_pci_tbl[idx], pdev1) < 0)
+ return -1;
+ dev++;
}
}
if_scan_init();
@@ -6913,33 +6781,27 @@ static int __init bypass_init_module(void)
{
bpctl_dev_t *pbpctl_dev_c = NULL;
- for (idx_dev = 0;
- ((bpctl_dev_arr[idx_dev].pdev != NULL)
- && (idx_dev < device_num)); idx_dev++) {
- if (bpctl_dev_arr[idx_dev].bp_10g9) {
- pbpctl_dev_c =
- get_status_port_fn(&bpctl_dev_arr[idx_dev]);
- if (is_bypass_fn(&bpctl_dev_arr[idx_dev])) {
+ for (idx_dev = 0, dev = bpctl_dev_arr;
+ idx_dev < device_num && dev->pdev;
+ idx_dev++, dev++) {
+ if (dev->bp_10g9) {
+ pbpctl_dev_c = get_status_port_fn(dev);
+ if (is_bypass_fn(dev)) {
printk(KERN_INFO "%s found, ",
- bpctl_dev_arr[idx_dev].name);
- bpctl_dev_arr[idx_dev].bp_fw_ver =
- bypass_fw_ver(&bpctl_dev_arr
- [idx_dev]);
+ dev->name);
+ dev->bp_fw_ver = bypass_fw_ver(dev);
printk("firmware version: 0x%x\n",
- bpctl_dev_arr[idx_dev].
- bp_fw_ver);
-
+ dev->bp_fw_ver);
}
- bpctl_dev_arr[idx_dev].wdt_status =
- WDT_STATUS_UNKNOWN;
- bpctl_dev_arr[idx_dev].reset_time = 0;
- atomic_set(&bpctl_dev_arr[idx_dev].wdt_busy, 0);
- bpctl_dev_arr[idx_dev].bp_status_un = 1;
+ dev->wdt_status = WDT_STATUS_UNKNOWN;
+ dev->reset_time = 0;
+ atomic_set(&dev->wdt_busy, 0);
+ dev->bp_status_un = 1;
- bypass_caps_init(&bpctl_dev_arr[idx_dev]);
+ bypass_caps_init(dev);
- init_bypass_wd_auto(&bpctl_dev_arr[idx_dev]);
- init_bypass_tpl_auto(&bpctl_dev_arr[idx_dev]);
+ init_bypass_wd_auto(dev);
+ init_bypass_tpl_auto(dev);
}
@@ -7336,78 +7198,11 @@ EXPORT_SYMBOL_NOVERS(bp_if_scan_sd);
#define BP_PROC_DIR "bypass"
-#define GPIO6_SET_ENTRY_SD "gpio6_set"
-#define GPIO6_CLEAR_ENTRY_SD "gpio6_clear"
-
-#define GPIO7_SET_ENTRY_SD "gpio7_set"
-#define GPIO7_CLEAR_ENTRY_SD "gpio7_clear"
-
-#define PULSE_SET_ENTRY_SD "pulse_set"
-#define ZERO_SET_ENTRY_SD "zero_set"
-#define PULSE_GET1_ENTRY_SD "pulse_get1"
-#define PULSE_GET2_ENTRY_SD "pulse_get2"
-
-#define CMND_ON_ENTRY_SD "cmnd_on"
-#define CMND_OFF_ENTRY_SD "cmnd_off"
-#define RESET_CONT_ENTRY_SD "reset_cont"
-
- /*COMMANDS*/
-#define BYPASS_INFO_ENTRY_SD "bypass_info"
-#define BYPASS_SLAVE_ENTRY_SD "bypass_slave"
-#define BYPASS_CAPS_ENTRY_SD "bypass_caps"
-#define WD_SET_CAPS_ENTRY_SD "wd_set_caps"
-#define BYPASS_ENTRY_SD "bypass"
-#define BYPASS_CHANGE_ENTRY_SD "bypass_change"
-#define BYPASS_WD_ENTRY_SD "bypass_wd"
-#define WD_EXPIRE_TIME_ENTRY_SD "wd_expire_time"
-#define RESET_BYPASS_WD_ENTRY_SD "reset_bypass_wd"
-#define DIS_BYPASS_ENTRY_SD "dis_bypass"
-#define BYPASS_PWUP_ENTRY_SD "bypass_pwup"
-#define BYPASS_PWOFF_ENTRY_SD "bypass_pwoff"
-#define STD_NIC_ENTRY_SD "std_nic"
-#define STD_NIC_ENTRY_SD "std_nic"
-#define TAP_ENTRY_SD "tap"
-#define TAP_CHANGE_ENTRY_SD "tap_change"
-#define DIS_TAP_ENTRY_SD "dis_tap"
-#define TAP_PWUP_ENTRY_SD "tap_pwup"
-#define TWO_PORT_LINK_ENTRY_SD "two_port_link"
-#define WD_EXP_MODE_ENTRY_SD "wd_exp_mode"
-#define WD_AUTORESET_ENTRY_SD "wd_autoreset"
-#define TPL_ENTRY_SD "tpl"
-#define WAIT_AT_PWUP_ENTRY_SD "wait_at_pwup"
-#define HW_RESET_ENTRY_SD "hw_reset"
-#define DISC_ENTRY_SD "disc"
-#define DISC_CHANGE_ENTRY_SD "disc_change"
-#define DIS_DISC_ENTRY_SD "dis_disc"
-#define DISC_PWUP_ENTRY_SD "disc_pwup"
static struct proc_dir_entry *bp_procfs_dir;
-static struct proc_dir_entry *proc_getdir(char *name,
- struct proc_dir_entry *proc_dir)
-{
- struct proc_dir_entry *pde = proc_dir;
-
- for (pde = pde->subdir; pde; pde = pde->next) {
- if (pde->namelen && (strcmp(name, pde->name) == 0)) {
- /* directory exists */
- break;
- }
- }
- if (pde == (struct proc_dir_entry *)0) {
- /* create the directory */
- pde = proc_mkdir(name, proc_dir);
- if (pde == (struct proc_dir_entry *)0) {
-
- return pde;
- }
- }
-
- return pde;
-}
-
int bp_proc_create(void)
{
- bp_procfs_dir = proc_getdir(BP_PROC_DIR, init_net.proc_net);
+ bp_procfs_dir = proc_mkdir(BP_PROC_DIR, init_net.proc_net);
if (bp_procfs_dir == (struct proc_dir_entry *)0) {
printk(KERN_DEBUG
"Could not create procfs nicinfo directory %s\n",
@@ -7417,144 +7212,99 @@ int bp_proc_create(void)
return 0;
}
-int
-bypass_proc_create_entry_sd(struct pfs_unit_sd *pfs_unit_curr,
- char *proc_name,
- write_proc_t *write_proc,
- read_proc_t *read_proc,
- struct proc_dir_entry *parent_pfs, void *data)
+static int procfs_add(char *proc_name, const struct file_operations *fops,
+ bpctl_dev_t *dev)
{
- strcpy(pfs_unit_curr->proc_name, proc_name);
- pfs_unit_curr->proc_entry = create_proc_entry(pfs_unit_curr->proc_name,
- S_IFREG | S_IRUSR |
- S_IWUSR | S_IRGRP |
- S_IROTH, parent_pfs);
- if (pfs_unit_curr->proc_entry == NULL)
+ struct bypass_pfs_sd *pfs = &dev->bypass_pfs_set;
+ if (!proc_create_data(proc_name, 0644, pfs->bypass_entry, fops, dev))
return -1;
-
- pfs_unit_curr->proc_entry->read_proc = read_proc;
- pfs_unit_curr->proc_entry->write_proc = write_proc;
- pfs_unit_curr->proc_entry->data = data;
-
return 0;
-
}
-int
-get_bypass_info_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
- int len = 0;
-
- len += sprintf(page, "Name\t\t\t%s\n", pbp_device_block->name);
- len +=
- sprintf(page + len, "Firmware version\t0x%x\n",
- pbp_device_block->bp_fw_ver);
+#define RO_FOPS(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, show_##name, PDE_DATA(inode));\
+} \
+static const struct file_operations name##_ops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
- *eof = 1;
- return len;
-}
+#define RW_FOPS(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, show_##name, PDE_DATA(inode));\
+} \
+static const struct file_operations name##_ops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .write = name##_write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
-int
-get_bypass_slave_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_bypass_info(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
+ bpctl_dev_t *dev = m->private;
- int len = 0;
- bpctl_dev_t *pbp_device_block_slave = NULL;
- int idx_dev = 0;
- struct net_device *net_slave_dev = NULL;
-
- if ((pbp_device_block->func == 0) || (pbp_device_block->func == 2)) {
- for (idx_dev = 0;
- ((bpctl_dev_arr[idx_dev].pdev != NULL)
- && (idx_dev < device_num)); idx_dev++) {
- if ((bpctl_dev_arr[idx_dev].bus ==
- pbp_device_block->bus)
- && (bpctl_dev_arr[idx_dev].slot ==
- pbp_device_block->slot)) {
- if ((pbp_device_block->func == 0)
- && (bpctl_dev_arr[idx_dev].func == 1)) {
- pbp_device_block_slave =
- &bpctl_dev_arr[idx_dev];
- break;
- }
- if ((pbp_device_block->func == 2) &&
- (bpctl_dev_arr[idx_dev].func == 3)) {
- pbp_device_block_slave =
- &bpctl_dev_arr[idx_dev];
- break;
- }
- }
- }
- } else
- pbp_device_block_slave = pbp_device_block;
- if (!pbp_device_block_slave) {
- len = sprintf(page, "fail\n");
- *eof = 1;
- return len;
- }
- net_slave_dev = pbp_device_block_slave->ndev;
- if (net_slave_dev)
- len = sprintf(page, "%s\n", net_slave_dev->name);
-
- *eof = 1;
- return len;
+ seq_printf(m, "Name\t\t\t%s\n", dev->name);
+ seq_printf(m, "Firmware version\t0x%x\n", dev->bp_fw_ver);
+ return 0;
}
+RO_FOPS(bypass_info)
-int
-get_bypass_caps_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_bypass_slave(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
+ bpctl_dev_t *dev = m->private;
+ bpctl_dev_t *slave = get_status_port_fn(dev);
+ if (!slave)
+ slave = dev;
+ if (!slave)
+ seq_printf(m, "fail\n");
+ else if (slave->ndev)
+ seq_printf(m, "%s\n", slave->ndev->name);
+ return 0;
+}
+RO_FOPS(bypass_slave)
- ret = get_bypass_caps_fn(pbp_device_block);
+static int show_bypass_caps(struct seq_file *m, void *v)
+{
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bypass_caps_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "-1\n");
+ seq_printf(m, "-1\n");
else
- len = sprintf(page, "0x%x\n", ret);
- *eof = 1;
- return len;
-
+ seq_printf(m, "0x%x\n", ret);
+ return 0;
}
+RO_FOPS(bypass_caps)
-int
-get_wd_set_caps_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_wd_set_caps(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_wd_set_caps_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_wd_set_caps_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "-1\n");
+ seq_printf(m, "-1\n");
else
- len = sprintf(page, "0x%x\n", ret);
- *eof = 1;
- return len;
+ seq_printf(m, "0x%x\n", ret);
+ return 0;
}
+RO_FOPS(wd_set_caps)
-int
-set_bypass_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int user_on_off(const void __user *buffer, size_t count)
{
char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
+ int length = 0;
if (count > (sizeof(kbuf) - 1))
return -1;
- if (copy_from_user(&kbuf, buffer, count)) {
+ if (copy_from_user(&kbuf, buffer, count))
return -1;
- }
kbuf[count] = '\0';
length = strlen(kbuf);
@@ -7562,806 +7312,467 @@ set_bypass_pfs(struct file *file, const char *buffer,
kbuf[--length] = '\0';
if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_bypass_fn(pbp_device_block, bypass_param);
-
- return count;
+ return 1;
+ if (strcmp(kbuf, "off") == 0)
+ return 0;
+ return 0;
}
-int
-set_tap_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t bypass_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
+ int bypass_param = user_on_off(buffer, count);
+ if (bypass_param < 0)
return -1;
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_tap_fn(pbp_device_block, tap_param);
-
+ set_bypass_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
-
-int
-set_disc_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int show_bypass(struct seq_file *m, void *v)
{
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bypass_fn(dev);
+ if (ret == BP_NOT_CAP)
+ seq_printf(m, "fail\n");
+ else if (ret == 1)
+ seq_printf(m, "on\n");
+ else if (ret == 0)
+ seq_printf(m, "off\n");
+ return 0;
+}
+RW_FOPS(bypass)
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count)) {
+static ssize_t tap_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ int tap_param = user_on_off(buffer, count);
+ if (tap_param < 0)
return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_disc_fn(pbp_device_block, tap_param);
+ set_tap_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
-
-int
-get_bypass_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_tap(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_tap_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "off\n");
+ return 0;
}
+RW_FOPS(tap)
-int
-get_tap_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t disc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tap_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 1)
- len = sprintf(page, "on\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
+ int tap_param = user_on_off(buffer, count);
+ if (tap_param < 0)
+ return -1;
- *eof = 1;
- return len;
+ set_disc_fn(PDE_DATA(file_inode(file)), tap_param);
+ return count;
}
-
-int
-get_disc_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_disc(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_disc_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_disc_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "off\n");
+ return 0;
}
+RW_FOPS(disc)
-int
-get_bypass_change_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_bypass_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_change_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bypass_change_fn(dev);
if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "fail\n");
+ return 0;
}
+RO_FOPS(bypass_change)
-int
-get_tap_change_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_tap_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tap_change_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_tap_change_fn(dev);
if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "fail\n");
+ return 0;
}
+RO_FOPS(tap_change)
-int
-get_disc_change_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_disc_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_disc_change_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_disc_change_fn(dev);
if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
-}
-
-#define isdigit(c) (c >= '0' && c <= '9')
-__inline static int atoi(char **s)
-{
- int i = 0;
- while (isdigit(**s))
- i = i * 10 + *((*s)++) - '0';
- return i;
+ seq_printf(m, "fail\n");
+ return 0;
}
+RO_FOPS(disc_change)
-int
-set_bypass_wd_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t bypass_wd_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
+ bpctl_dev_t *dev = PDE_DATA(file_inode(file));
int timeout;
- int ret;
-
- ret = kstrtoint_from_user(buffer, count, 10, &timeout);
+ int ret = kstrtoint_from_user(buffer, count, 10, &timeout);
if (ret)
return ret;
- set_bypass_wd_fn(pbp_device_block, timeout);
-
+ set_bypass_wd_fn(dev, timeout);
return count;
}
-
-int
-get_bypass_wd_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_bypass_wd(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
+ bpctl_dev_t *dev = m->private;
+ int ret = 0, timeout = 0;
- int len = 0, ret = 0, timeout = 0;
-
- ret = get_bypass_wd_fn(pbp_device_block, &timeout);
+ ret = get_bypass_wd_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (timeout == -1)
- len = sprintf(page, "unknown\n");
+ seq_printf(m, "unknown\n");
else if (timeout == 0)
- len = sprintf(page, "disable\n");
+ seq_printf(m, "disable\n");
else
- len = sprintf(page, "%d\n", timeout);
-
- *eof = 1;
- return len;
+ seq_printf(m, "%d\n", timeout);
+ return 0;
}
+RW_FOPS(bypass_wd)
-int
-get_wd_expire_time_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_wd_expire_time(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0, timeout = 0;
-
- ret = get_wd_expire_time_fn(pbp_device_block, &timeout);
+ bpctl_dev_t *dev = m->private;
+ int ret = 0, timeout = 0;
+ ret = get_wd_expire_time_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (timeout == -1)
- len = sprintf(page, "expire\n");
+ seq_printf(m, "expire\n");
else if (timeout == 0)
- len = sprintf(page, "disable\n");
-
+ seq_printf(m, "disable\n");
else
- len = sprintf(page, "%d\n", timeout);
- *eof = 1;
- return len;
+ seq_printf(m, "%d\n", timeout);
+ return 0;
}
+RO_FOPS(wd_expire_time)
-int
-get_tpl_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t tpl_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
+ bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ int tpl_param = user_on_off(buffer, count);
+ if (tpl_param < 0)
+ return -1;
- ret = get_tpl_fn(pbp_device_block);
+ set_tpl_fn(dev, tpl_param);
+ return count;
+}
+static int show_tpl(struct seq_file *m, void *v)
+{
+ bpctl_dev_t *dev = m->private;
+ int ret = get_tpl_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "off\n");
+ return 0;
}
+RW_FOPS(tpl)
#ifdef PMC_FIX_FLAG
-int
-get_wait_at_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t wait_at_pwup_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
+ bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ int tpl_param = user_on_off(buffer, count);
+ if (tpl_param < 0)
+ return -1;
- ret = get_bp_wait_at_pwup_fn(pbp_device_block);
+ set_bp_wait_at_pwup_fn(dev, tpl_param);
+ return count;
+}
+static int show_wait_at_pwup(struct seq_file *m, void *v)
+{
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bp_wait_at_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "off\n");
+ return 0;
}
+RW_FOPS(wait_at_pwup)
-int
-get_hw_reset_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t hw_reset_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
+ bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ int tpl_param = user_on_off(buffer, count);
+ if (tpl_param < 0)
+ return -1;
- ret = get_bp_hw_reset_fn(pbp_device_block);
+ set_bp_hw_reset_fn(dev, tpl_param);
+ return count;
+}
+static int show_hw_reset(struct seq_file *m, void *v)
+{
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bp_hw_reset_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 1)
- len = sprintf(page, "on\n");
+ seq_printf(m, "on\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "off\n");
+ return 0;
}
+RW_FOPS(hw_reset)
#endif /*PMC_WAIT_FLAG */
-int
-reset_bypass_wd_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_reset_bypass_wd(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = reset_bypass_wd_timer_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = reset_bypass_wd_timer_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "disable\n");
+ seq_printf(m, "disable\n");
else if (ret == 1)
- len = sprintf(page, "success\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "success\n");
+ return 0;
}
+RO_FOPS(reset_bypass_wd)
-int
-set_dis_bypass_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t dis_bypass_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
+ int bypass_param = user_on_off(buffer, count);
+ if (bypass_param < 0)
return -EINVAL;
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_dis_bypass_fn(pbp_device_block, bypass_param);
-
+ set_dis_bypass_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
-
-int
-set_dis_tap_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int show_dis_bypass(struct seq_file *m, void *v)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_dis_tap_fn(pbp_device_block, tap_param);
-
- return count;
+ bpctl_dev_t *dev = m->private;
+ int ret = get_dis_bypass_fn(dev);
+ if (ret == BP_NOT_CAP)
+ seq_printf(m, "fail\n");
+ else if (ret == 0)
+ seq_printf(m, "off\n");
+ else
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(dis_bypass)
-int
-set_dis_disc_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t dis_tap_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
+ int tap_param = user_on_off(buffer, count);
+ if (tap_param < 0)
return -EINVAL;
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_dis_disc_fn(pbp_device_block, tap_param);
-
+ set_dis_tap_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
-
-int
-get_dis_bypass_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_dis_tap(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_dis_bypass_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_dis_tap_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(dis_tap)
-int
-get_dis_tap_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t dis_disc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_dis_tap_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
+ int tap_param = user_on_off(buffer, count);
+ if (tap_param < 0)
+ return -EINVAL;
- *eof = 1;
- return len;
+ set_dis_disc_fn(PDE_DATA(file_inode(file)), tap_param);
+ return count;
}
-
-int
-get_dis_disc_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_dis_disc(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_dis_disc_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_dis_disc_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-set_bypass_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_bypass_pwup_fn(pbp_device_block, bypass_param);
-
- return count;
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(dis_disc)
-int
-set_bypass_pwoff_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t bypass_pwup_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
+ int bypass_param = user_on_off(buffer, count);
+ if (bypass_param < 0)
return -EINVAL;
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_bypass_pwoff_fn(pbp_device_block, bypass_param);
-
+ set_bypass_pwup_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
-
-int
-set_tap_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int show_bypass_pwup(struct seq_file *m, void *v)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_tap_pwup_fn(pbp_device_block, tap_param);
-
- return count;
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bypass_pwup_fn(dev);
+ if (ret == BP_NOT_CAP)
+ seq_printf(m, "fail\n");
+ else if (ret == 0)
+ seq_printf(m, "off\n");
+ else
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(bypass_pwup)
-int
-set_disc_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t bypass_pwoff_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tap_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
+ int bypass_param = user_on_off(buffer, count);
+ if (bypass_param < 0)
return -EINVAL;
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tap_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tap_param = 0;
-
- set_disc_pwup_fn(pbp_device_block, tap_param);
-
+ set_bypass_pwoff_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
-
-int
-get_bypass_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_bypass_pwoff(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_pwup_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_bypass_pwoff_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(bypass_pwoff)
-int
-get_bypass_pwoff_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t tap_pwup_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_bypass_pwoff_fn(pbp_device_block);
- if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
- else if (ret == 0)
- len = sprintf(page, "off\n");
- else
- len = sprintf(page, "on\n");
+ int tap_param = user_on_off(buffer, count);
+ if (tap_param < 0)
+ return -EINVAL;
- *eof = 1;
- return len;
+ set_tap_pwup_fn(PDE_DATA(file_inode(file)), tap_param);
+ return count;
}
-
-int
-get_tap_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_tap_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_tap_pwup_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_tap_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(tap_pwup)
-int
-get_disc_pwup_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static ssize_t disc_pwup_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
+ int tap_param = user_on_off(buffer, count);
+ if (tap_param < 0)
+ return -EINVAL;
- ret = get_disc_pwup_fn(pbp_device_block);
+ set_disc_pwup_fn(PDE_DATA(file_inode(file)), tap_param);
+ return count;
+}
+static int show_disc_pwup(struct seq_file *m, void *v)
+{
+ bpctl_dev_t *dev = m->private;
+ int ret = get_disc_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(disc_pwup)
-int
-set_std_nic_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t std_nic_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int bypass_param = 0, length = 0;
-
- if (count >= sizeof(kbuf))
+ int bypass_param = user_on_off(buffer, count);
+ if (bypass_param < 0)
return -EINVAL;
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- bypass_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- bypass_param = 0;
-
- set_std_nic_fn(pbp_device_block, bypass_param);
-
+ set_std_nic_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
-
-int
-get_std_nic_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_std_nic(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_std_nic_fn(pbp_device_block);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_std_nic_fn(dev);
if (ret == BP_NOT_CAP)
- len = sprintf(page, "fail\n");
+ seq_printf(m, "fail\n");
else if (ret == 0)
- len = sprintf(page, "off\n");
+ seq_printf(m, "off\n");
else
- len = sprintf(page, "on\n");
-
- *eof = 1;
- return len;
-}
-
-int
-get_wd_exp_mode_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_wd_exp_mode_fn(pbp_device_block);
- if (ret == 1)
- len = sprintf(page, "tap\n");
- else if (ret == 0)
- len = sprintf(page, "bypass\n");
- else if (ret == 2)
- len = sprintf(page, "disc\n");
-
- else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "on\n");
+ return 0;
}
+RW_FOPS(std_nic)
-int
-set_wd_exp_mode_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t wd_exp_mode_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
-
char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
int bypass_param = 0, length = 0;
if (count > (sizeof(kbuf) - 1))
return -1;
- if (copy_from_user(&kbuf, buffer, count)) {
+ if (copy_from_user(&kbuf, buffer, count))
return -1;
- }
kbuf[count] = '\0';
length = strlen(kbuf);
@@ -8375,143 +7786,47 @@ set_wd_exp_mode_pfs(struct file *file, const char *buffer,
else if (strcmp(kbuf, "disc") == 0)
bypass_param = 2;
- set_wd_exp_mode_fn(pbp_device_block, bypass_param);
+ set_wd_exp_mode_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
-
-int
-get_wd_autoreset_pfs(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int show_wd_exp_mode(struct seq_file *m, void *v)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int len = 0, ret = 0;
-
- ret = get_wd_autoreset_fn(pbp_device_block);
- if (ret >= 0)
- len = sprintf(page, "%d\n", ret);
+ bpctl_dev_t *dev = m->private;
+ int ret = get_wd_exp_mode_fn(dev);
+ if (ret == 1)
+ seq_printf(m, "tap\n");
+ else if (ret == 0)
+ seq_printf(m, "bypass\n");
+ else if (ret == 2)
+ seq_printf(m, "disc\n");
else
- len = sprintf(page, "fail\n");
-
- *eof = 1;
- return len;
+ seq_printf(m, "fail\n");
+ return 0;
}
+RW_FOPS(wd_exp_mode)
-int
-set_wd_autoreset_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static ssize_t wd_autoreset_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
int timeout;
- int ret;
-
- ret = kstrtoint_from_user(buffer, count, 10, &timeout);
+ int ret = kstrtoint_from_user(buffer, count, 10, &timeout);
if (ret)
return ret;
- set_wd_autoreset_fn(pbp_device_block, timeout);
-
- return count;
-}
-
-int
-set_tpl_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tpl_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tpl_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tpl_param = 0;
-
- set_tpl_fn(pbp_device_block, tpl_param);
-
+ set_wd_autoreset_fn(PDE_DATA(file_inode(file)), timeout);
return count;
}
-
-#ifdef PMC_FIX_FLAG
-int
-set_wait_at_pwup_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tpl_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tpl_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tpl_param = 0;
-
- set_bp_wait_at_pwup_fn(pbp_device_block, tpl_param);
-
- return count;
-}
-
-int
-set_hw_reset_pfs(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int show_wd_autoreset(struct seq_file *m, void *v)
{
-
- char kbuf[256];
- bpctl_dev_t *pbp_device_block = (bpctl_dev_t *) data;
-
- int tpl_param = 0, length = 0;
-
- if (count > (sizeof(kbuf) - 1))
- return -1;
-
- if (copy_from_user(&kbuf, buffer, count)) {
- return -1;
- }
-
- kbuf[count] = '\0';
- length = strlen(kbuf);
- if (kbuf[length - 1] == '\n')
- kbuf[--length] = '\0';
-
- if (strcmp(kbuf, "on") == 0)
- tpl_param = 1;
- else if (strcmp(kbuf, "off") == 0)
- tpl_param = 0;
-
- set_bp_hw_reset_fn(pbp_device_block, tpl_param);
-
- return count;
+ bpctl_dev_t *dev = m->private;
+ int ret = get_wd_autoreset_fn(dev);
+ if (ret >= 0)
+ seq_printf(m, "%d\n", ret);
+ else
+ seq_printf(m, "fail\n");
+ return 0;
}
-
-#endif /*PMC_FIX_FLAG */
+RW_FOPS(wd_autoreset)
int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
{
@@ -8528,168 +7843,54 @@ int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
return -1;
/* create device proc dir */
- procfs_dir = proc_getdir(current_pfs->dir_name, bp_procfs_dir);
- if (procfs_dir == 0) {
+ procfs_dir = proc_mkdir(current_pfs->dir_name, bp_procfs_dir);
+ if (!procfs_dir) {
printk(KERN_DEBUG "Could not create procfs directory %s\n",
current_pfs->dir_name);
return -1;
}
current_pfs->bypass_entry = procfs_dir;
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_info), BYPASS_INFO_ENTRY_SD, NULL, /* write */
- get_bypass_info_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
+#define ENTRY(x) ret |= procfs_add(#x, &x##_ops, pbp_device_block)
+ ENTRY(bypass_info);
if (pbp_device_block->bp_caps & SW_CTL_CAP) {
-
/* Create set param proc's */
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_slave), BYPASS_SLAVE_ENTRY_SD, NULL, /* write */
- get_bypass_slave_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_caps), BYPASS_CAPS_ENTRY_SD, NULL, /* write */
- get_bypass_caps_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_set_caps), WD_SET_CAPS_ENTRY_SD, NULL, /* write */
- get_wd_set_caps_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_wd), BYPASS_WD_ENTRY_SD, set_bypass_wd_pfs, /* write */
- get_bypass_wd_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_expire_time), WD_EXPIRE_TIME_ENTRY_SD, NULL, /* write */
- get_wd_expire_time_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->reset_bypass_wd), RESET_BYPASS_WD_ENTRY_SD, NULL, /* write */
- reset_bypass_wd_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->std_nic), STD_NIC_ENTRY_SD, set_std_nic_pfs, /* write */
- get_std_nic_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
+ ENTRY(bypass_slave);
+ ENTRY(bypass_caps);
+ ENTRY(wd_set_caps);
+ ENTRY(bypass_wd);
+ ENTRY(wd_expire_time);
+ ENTRY(reset_bypass_wd);
+ ENTRY(std_nic);
if (pbp_device_block->bp_caps & BP_CAP) {
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass), BYPASS_ENTRY_SD, set_bypass_pfs, /* write */
- get_bypass_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->dis_bypass), DIS_BYPASS_ENTRY_SD, set_dis_bypass_pfs, /* write */
- get_dis_bypass_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_pwup), BYPASS_PWUP_ENTRY_SD, set_bypass_pwup_pfs, /* write */
- get_bypass_pwup_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_pwoff), BYPASS_PWOFF_ENTRY_SD, set_bypass_pwoff_pfs, /* write */
- get_bypass_pwoff_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->bypass_change), BYPASS_CHANGE_ENTRY_SD, NULL, /* write */
- get_bypass_change_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
+ ENTRY(bypass);
+ ENTRY(dis_bypass);
+ ENTRY(bypass_pwup);
+ ENTRY(bypass_pwoff);
+ ENTRY(bypass_change);
}
-
if (pbp_device_block->bp_caps & TAP_CAP) {
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap), TAP_ENTRY_SD, set_tap_pfs, /* write */
- get_tap_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->dis_tap), DIS_TAP_ENTRY_SD, set_dis_tap_pfs, /* write */
- get_dis_tap_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_pwup), TAP_PWUP_ENTRY_SD, set_tap_pwup_pfs, /* write */
- get_tap_pwup_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_change), TAP_CHANGE_ENTRY_SD, NULL, /* write */
- get_tap_change_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
+ ENTRY(tap);
+ ENTRY(dis_tap);
+ ENTRY(tap_pwup);
+ ENTRY(tap_change);
}
if (pbp_device_block->bp_caps & DISC_CAP) {
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap), DISC_ENTRY_SD, set_disc_pfs, /* write */
- get_disc_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-#if 1
-
- if (bypass_proc_create_entry_sd(&(current_pfs->dis_tap), DIS_DISC_ENTRY_SD, set_dis_disc_pfs, /* write */
- get_dis_disc_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-#endif
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_pwup), DISC_PWUP_ENTRY_SD, set_disc_pwup_pfs, /* write */
- get_disc_pwup_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->tap_change), DISC_CHANGE_ENTRY_SD, NULL, /* write */
- get_disc_change_pfs, /* read */
- procfs_dir,
- pbp_device_block))
- ret = -1;
+ ENTRY(disc);
+ ENTRY(dis_disc);
+ ENTRY(disc_pwup);
+ ENTRY(disc_change);
}
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_exp_mode), WD_EXP_MODE_ENTRY_SD, set_wd_exp_mode_pfs, /* write */
- get_wd_exp_mode_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
- if (bypass_proc_create_entry_sd(&(current_pfs->wd_autoreset), WD_AUTORESET_ENTRY_SD, set_wd_autoreset_pfs, /* write */
- get_wd_autoreset_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->tpl), TPL_ENTRY_SD, set_tpl_pfs, /* write */
- get_tpl_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
+ ENTRY(wd_exp_mode);
+ ENTRY(wd_autoreset);
+ ENTRY(tpl);
#ifdef PMC_FIX_FLAG
- if (bypass_proc_create_entry_sd(&(current_pfs->tpl), WAIT_AT_PWUP_ENTRY_SD, set_wait_at_pwup_pfs, /* write */
- get_wait_at_pwup_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
- if (bypass_proc_create_entry_sd(&(current_pfs->tpl), HW_RESET_ENTRY_SD, set_hw_reset_pfs, /* write */
- get_hw_reset_pfs, /* read */
- procfs_dir, pbp_device_block))
- ret = -1;
-
+ ENTRY(wait_at_pwup);
+ ENTRY(hw_reset);
#endif
-
}
+#undef ENTRY
if (ret < 0)
printk(KERN_DEBUG "Create proc entry failed\n");
@@ -8700,21 +7901,7 @@ int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
- struct proc_dir_entry *pde = current_pfs->bypass_entry, *pde_curr =
- NULL;
- char name[256];
-
- if (!pde)
- return 0;
- for (pde = pde->subdir; pde;) {
- strcpy(name, pde->name);
- pde_curr = pde;
- pde = pde->next;
- remove_proc_entry(name, current_pfs->bypass_entry);
- }
- if (!pde)
- remove_proc_entry(current_pfs->dir_name, bp_procfs_dir);
+ remove_proc_subtree(current_pfs->dir_name, bp_procfs_dir);
current_pfs->bypass_entry = NULL;
-
return 0;
}
diff --git a/drivers/staging/ste_rmi4/Makefile b/drivers/staging/ste_rmi4/Makefile
index e4c0335..6cce2ed 100644
--- a/drivers/staging/ste_rmi4/Makefile
+++ b/drivers/staging/ste_rmi4/Makefile
@@ -2,4 +2,3 @@
# Makefile for the RMI4 touchscreen driver.
#
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
-obj-$(CONFIG_MACH_MOP500) += board-mop500-u8500uib-rmi4.o
diff --git a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
deleted file mode 100644
index 47439c3..0000000
--- a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Some platform data for the RMI4 touchscreen that will override the __weak
- * platform data in the Ux500 machine if this driver is activated.
- */
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <mach/irqs.h>
-#include "synaptics_i2c_rmi4.h"
-
-/*
- * Synaptics RMI4 touchscreen interface on the U8500 UIB
- */
-
-/*
- * Descriptor structure.
- * Describes the number of i2c devices on the bus that speak RMI.
- */
-static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = {
- .irq_number = NOMADIK_GPIO_TO_IRQ(84),
- .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
- .x_flip = false,
- .y_flip = true,
-};
-
-struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
- {
- I2C_BOARD_INFO("synaptics_rmi4_i2c", 0x4B),
- .platform_data = &rmi4_i2c_dev_platformdata,
- },
-};
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index a126b25..fe667dd 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -864,6 +864,16 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
return 0;
}
+/*
+ * Descriptor structure.
+ * Describes the number of i2c devices on the bus that speak RMI.
+ */
+static struct synaptics_rmi4_platform_data synaptics_rmi4_platformdata = {
+ .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+ .x_flip = false,
+ .y_flip = true,
+};
+
/**
* synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
* @i2c: i2c client structure pointer
@@ -890,10 +900,8 @@ static int synaptics_rmi4_probe
return -EIO;
}
- if (!platformdata) {
- dev_err(&client->dev, "%s: no platform data\n", __func__);
- return -EINVAL;
- }
+ if (!platformdata)
+ platformdata = &synaptics_rmi4_platformdata;
/* Allocate and initialize the instance data for this client */
rmi4_data = kcalloc(2, sizeof(struct synaptics_rmi4_data),
@@ -977,13 +985,13 @@ static int synaptics_rmi4_probe
synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1, intr_status,
rmi4_data->number_of_interrupt_register);
- retval = request_threaded_irq(platformdata->irq_number, NULL,
+ retval = request_threaded_irq(client->irq, NULL,
synaptics_rmi4_irq,
platformdata->irq_type,
DRIVER_NAME, rmi4_data);
if (retval) {
dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
- __func__, platformdata->irq_number);
+ __func__, client->irq);
goto err_query_dev;
}
@@ -996,7 +1004,7 @@ static int synaptics_rmi4_probe
return retval;
err_free_irq:
- free_irq(platformdata->irq_number, rmi4_data);
+ free_irq(client->irq, rmi4_data);
err_query_dev:
regulator_disable(rmi4_data->regulator);
err_regulator_enable:
@@ -1019,11 +1027,10 @@ err_input:
static int synaptics_rmi4_remove(struct i2c_client *client)
{
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
- free_irq(pdata->irq_number, rmi4_data);
+ free_irq(client->irq, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
regulator_disable(rmi4_data->regulator);
regulator_put(rmi4_data->regulator);
@@ -1046,10 +1053,9 @@ static int synaptics_rmi4_suspend(struct device *dev)
int retval;
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
rmi4_data->touch_stopped = true;
- disable_irq(pdata->irq_number);
+ disable_irq(rmi4_data->i2c_client->irq);
retval = synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1,
@@ -1080,11 +1086,10 @@ static int synaptics_rmi4_resume(struct device *dev)
int retval;
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
regulator_enable(rmi4_data->regulator);
- enable_irq(pdata->irq_number);
+ enable_irq(rmi4_data->i2c_client->irq);
rmi4_data->touch_stopped = false;
retval = synaptics_rmi4_i2c_block_read(rmi4_data,
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
index 384436e..8c9166b 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -38,7 +38,6 @@
* This structure gives platform data for rmi4.
*/
struct synaptics_rmi4_platform_data {
- int irq_number;
int irq_type;
bool x_flip;
bool y_flip;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index be4f6c2..08b250f 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -60,6 +60,7 @@
*/
#undef __NO_VERSION__
+#include <linux/file.h>
#include "device.h"
#include "card.h"
#include "channel.h"
@@ -2737,83 +2738,50 @@ static int Config_FileGetParameter(unsigned char *string,
return true;
}
-int Config_FileOperation(PSDevice pDevice, bool fwrite, unsigned char *Parameter) {
- unsigned char *config_path = CONFIG_PATH;
- unsigned char *buffer = NULL;
+int Config_FileOperation(PSDevice pDevice,bool fwrite,unsigned char *Parameter)
+{
+ unsigned char *buffer = kmalloc(1024, GFP_KERNEL);
unsigned char tmpbuffer[20];
- struct file *filp = NULL;
- mm_segment_t old_fs = get_fs();
- //int oldfsuid=0,oldfsgid=0;
- int result = 0;
-
- set_fs(KERNEL_DS);
-
- /* Can't do this anymore, so we rely on correct filesystem permissions:
- //Make sure a caller can read or write power as root
- oldfsuid=current->cred->fsuid;
- oldfsgid=current->cred->fsgid;
- current->cred->fsuid = 0;
- current->cred->fsgid = 0;
- */
-
- //open file
- filp = filp_open(config_path, O_RDWR, 0);
- if (IS_ERR(filp)) {
- printk("Config_FileOperation:open file fail?\n");
- result = -1;
- goto error2;
- }
+ struct file *file;
+ int result=0;
- if (!(filp->f_op) || !(filp->f_op->read) || !(filp->f_op->write)) {
- printk("file %s cann't readable or writable?\n", config_path);
- result = -1;
- goto error1;
- }
-
- buffer = kmalloc(1024, GFP_KERNEL);
- if (buffer == NULL) {
+ if (!buffer) {
printk("allocate mem for file fail?\n");
- result = -1;
- goto error1;
+ return -1;
+ }
+ file = filp_open(CONFIG_PATH, O_RDONLY, 0);
+ if (IS_ERR(file)) {
+ kfree(buffer);
+ printk("Config_FileOperation:open file fail?\n");
+ return -1;
}
- if (filp->f_op->read(filp, buffer, 1024, &filp->f_pos) < 0) {
+ if (kernel_read(file, 0, buffer, 1024) < 0) {
printk("read file error?\n");
result = -1;
goto error1;
}
- if (Config_FileGetParameter("ZONETYPE", tmpbuffer, buffer) != true) {
+ if (Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer)!=true) {
printk("get parameter error?\n");
result = -1;
goto error1;
}
- if (memcmp(tmpbuffer, "USA", 3) == 0) {
+ if (memcmp(tmpbuffer,"USA",3)==0) {
result = ZoneType_USA;
- } else if (memcmp(tmpbuffer, "JAPAN", 5) == 0) {
+ } else if(memcmp(tmpbuffer,"JAPAN",5)==0) {
result = ZoneType_Japan;
- } else if (memcmp(tmpbuffer, "EUROPE", 5) == 0) {
+ } else if(memcmp(tmpbuffer,"EUROPE",5)==0) {
result = ZoneType_Europe;
} else {
result = -1;
- printk("Unknown Zonetype[%s]?\n", tmpbuffer);
+ printk("Unknown Zonetype[%s]?\n",tmpbuffer);
}
error1:
kfree(buffer);
-
- if (filp_close(filp, NULL))
- printk("Config_FileOperation:close file fail\n");
-
-error2:
- set_fs(old_fs);
-
- /*
- current->cred->fsuid=oldfsuid;
- current->cred->fsgid=oldfsgid;
- */
-
+ fput(file);
return result;
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2161af8..3a3fdc5 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -46,6 +46,7 @@
*/
#undef __NO_VERSION__
+#include <linux/file.h>
#include "device.h"
#include "card.h"
#include "baseband.h"
@@ -1273,53 +1274,29 @@ static int Config_FileGetParameter(unsigned char *string,
/* if read fails, return NULL, or return data pointer */
static unsigned char *Config_FileOperation(struct vnt_private *pDevice)
{
- unsigned char *config_path = CONFIG_PATH;
- unsigned char *buffer = NULL;
- struct file *filp=NULL;
- mm_segment_t old_fs = get_fs();
+ unsigned char *buffer = kmalloc(1024, GFP_KERNEL);
+ struct file *file;
- int result = 0;
-
- set_fs (KERNEL_DS);
-
- /* open file */
- filp = filp_open(config_path, O_RDWR, 0);
- if (IS_ERR(filp)) {
- printk("Config_FileOperation file Not exist\n");
- result=-1;
- goto error2;
- }
-
- if(!(filp->f_op) || !(filp->f_op->read) ||!(filp->f_op->write)) {
- printk("file %s is not read or writeable?\n",config_path);
- result = -1;
- goto error1;
- }
-
- buffer = kmalloc(1024, GFP_KERNEL);
- if(buffer==NULL) {
- printk("allocate mem for file fail?\n");
- result = -1;
- goto error1;
- }
-
- if(filp->f_op->read(filp, buffer, 1024, &filp->f_pos)<0) {
- printk("read file error?\n");
- result = -1;
- }
+ if (!buffer) {
+ printk("allocate mem for file fail?\n");
+ return NULL;
+ }
-error1:
- if(filp_close(filp,NULL))
- printk("Config_FileOperation:close file fail\n");
+ file = filp_open(CONFIG_PATH, O_RDONLY, 0);
+ if (IS_ERR(file)) {
+ kfree(buffer);
+ printk("Config_FileOperation file Not exist\n");
+ return NULL;
+ }
-error2:
- set_fs (old_fs);
+ if (kernel_read(file, 0, buffer, 1024) < 0) {
+ printk("read file error?\n");
+ kfree(buffer);
+ buffer = NULL;
+ }
-if(result!=0) {
- kfree(buffer);
- buffer=NULL;
-}
- return buffer;
+ fput(file);
+ return buffer;
}
/* return --->-1:fail; >=0:successful */
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index f5f120a..f28f15b 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -73,6 +73,7 @@
#include <linux/module.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <linux/kernel.h>
// #include <linux/sched.h>
@@ -144,10 +145,23 @@
void wl_isr_handler( unsigned long p );
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
-//int scull_read_procmem(char *buf, char **start, off_t offset, int len, int unused);
-int scull_read_procmem(char *buf, char **start, off_t offset, int len, int *eof, void *data );
+static int scull_read_procmem(struct seq_file *m, void *v);
static int write_int(struct file *file, const char *buffer, unsigned long count, void *data);
-static void proc_write(const char *name, write_proc_t *w, void *data);
+
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int scull_read_procmem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, scull_read_procmem, PDE_DATA(inode));
+}
+
+static const struct file_operations scull_read_procmem_fops = {
+ .open = scull_read_procmem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* SCULL_USE_PROC */
@@ -908,9 +922,8 @@ int wl_insert( struct net_device *dev )
}
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
- create_proc_read_entry( "wlags", 0, NULL, scull_read_procmem, dev );
+ proc_create_data( "wlags", 0, NULL, &scull_read_procmem_fops, dev );
proc_mkdir("driver/wlags49", 0);
- proc_write("driver/wlags49/wlags49_type", write_int, &lp->wlags49_type);
#endif /* SCULL_USE_PROC */
DBG_LEAVE( DbgInfo );
@@ -2097,7 +2110,7 @@ static void __exit wl_module_exit( void )
wl_adapter_cleanup_module( );
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
- remove_proc_entry( "wlags", NULL ); //;?why so a-symmetric compared to location of create_proc_read_entry
+ remove_proc_entry( "wlags", NULL ); //;?why so a-symmetric compared to location of proc_create_data
#endif
DBG_LEAVE( DbgInfo );
@@ -3531,229 +3544,215 @@ void wl_wds_netdev_deregister( struct wl_private *lp )
/*
* The proc filesystem: function to read and entry
*/
-int printf_hcf_16( char *s, char *buf, hcf_16* p, int n );
-int printf_hcf_16( char *s, char *buf, hcf_16* p, int n ) {
+static void printf_hcf_16(struct seq_file *m, const char *s, hcf_16 *p, int n)
+{
+ int i, len;
-int i, len;
+ seq_printf(m, "%-20.20s: ", s);
+ len = 22;
- len = sprintf(buf, "%s", s );
- while ( len < 20 ) len += sprintf(buf+len, " " );
- len += sprintf(buf+len,": " );
- for ( i = 0; i < n; i++ ) {
- if ( len % 80 > 75 ) {
- len += sprintf(buf+len,"\n" );
- }
- len += sprintf(buf+len,"%04X ", p[i] );
+ for (i = 0; i < n; i++) {
+ if (len % 80 > 75)
+ seq_putc(m, '\n');
+ seq_printf(m, "%04X ", p[i]);
}
- len += sprintf(buf+len,"\n" );
- return len;
-} // printf_hcf_16
+ seq_putc(m, '\n');
+}
-int printf_hcf_8( char *s, char *buf, hcf_8* p, int n );
-int printf_hcf_8( char *s, char *buf, hcf_8* p, int n ) {
+static void printf_hcf_8(struct seq_file *m, const char *s, hcf_8 *p, int n)
+{
+ int i, len;
-int i, len;
+ seq_printf(m, "%-20.20s: ", s);
+ len = 22;
- len = sprintf(buf, "%s", s );
- while ( len < 20 ) len += sprintf(buf+len, " " );
- len += sprintf(buf+len,": " );
- for ( i = 0; i <= n; i++ ) {
- if ( len % 80 > 77 ) {
- len += sprintf(buf+len,"\n" );
- }
- len += sprintf(buf+len,"%02X ", p[i] );
+ for (i = 0; i <= n; i++) {
+ if (len % 80 > 77)
+ seq_putc(m, '\n');
+ seq_printf(m, "%02X ", p[i]);
}
- len += sprintf(buf+len,"\n" );
- return len;
-} // printf_hcf8
+ seq_putc(m, '\n');
+}
-int printf_strct( char *s, char *buf, hcf_16* p );
-int printf_strct( char *s, char *buf, hcf_16* p ) {
+static void printf_strct(struct seq_file *m, const char *s, hcf_16 *p)
+{
+ int i, len;
-int i, len;
+ seq_printf(m, "%-20.20s: ", s);
+ len = 22;
- len = sprintf(buf, "%s", s );
- while ( len < 20 ) len += sprintf(buf+len, " " );
- len += sprintf(buf+len,": " );
for ( i = 0; i <= *p; i++ ) {
- if ( len % 80 > 75 ) {
- len += sprintf(buf+len,"\n" );
- }
- len += sprintf(buf+len,"%04X ", p[i] );
+ if (len % 80 > 75)
+ seq_putc(m, '\n');
+ seq_printf(m,"%04X ", p[i]);
}
- len += sprintf(buf+len,"\n" );
- return len;
-} // printf_strct
+ seq_putc(m, '\n');
+}
-int scull_read_procmem(char *buf, char **start, off_t offset, int len, int *eof, void *data )
+int scull_read_procmem(struct seq_file *m, void *v)
{
- struct wl_private *lp = NULL;
+ struct wl_private *lp = m->private;
IFBP ifbp;
CFG_HERMES_TALLIES_STRCT *p;
- #define LIMIT (PAGE_SIZE-80) /* don't print any more after this size */
-
- len=0;
-
- lp = ((struct net_device *)data)->priv;
if (lp == NULL) {
- len += sprintf(buf+len,"No wl_private in scull_read_procmem\n" );
+ seq_puts(m, "No wl_private in scull_read_procmem\n" );
} else if ( lp->wlags49_type == 0 ){
- ifbp = &lp->hcfCtx;
- len += sprintf(buf+len,"Magic: 0x%04X\n", ifbp->IFB_Magic );
- len += sprintf(buf+len,"IOBase: 0x%04X\n", ifbp->IFB_IOBase );
- len += sprintf(buf+len,"LinkStat: 0x%04X\n", ifbp->IFB_LinkStat );
- len += sprintf(buf+len,"DSLinkStat: 0x%04X\n", ifbp->IFB_DSLinkStat );
- len += sprintf(buf+len,"TickIni: 0x%08lX\n", ifbp->IFB_TickIni );
- len += sprintf(buf+len,"TickCnt: 0x%04X\n", ifbp->IFB_TickCnt );
- len += sprintf(buf+len,"IntOffCnt: 0x%04X\n", ifbp->IFB_IntOffCnt );
- len += printf_hcf_16( "IFB_FWIdentity", &buf[len],
- &ifbp->IFB_FWIdentity.len, ifbp->IFB_FWIdentity.len + 1 );
+ ifbp = &lp->hcfCtx;
+ seq_printf(m, "Magic: 0x%04X\n", ifbp->IFB_Magic );
+ seq_printf(m, "IOBase: 0x%04X\n", ifbp->IFB_IOBase );
+ seq_printf(m, "LinkStat: 0x%04X\n", ifbp->IFB_LinkStat );
+ seq_printf(m, "DSLinkStat: 0x%04X\n", ifbp->IFB_DSLinkStat );
+ seq_printf(m, "TickIni: 0x%08lX\n", ifbp->IFB_TickIni );
+ seq_printf(m, "TickCnt: 0x%04X\n", ifbp->IFB_TickCnt );
+ seq_printf(m, "IntOffCnt: 0x%04X\n", ifbp->IFB_IntOffCnt );
+ printf_hcf_16(m, "IFB_FWIdentity",
+ &ifbp->IFB_FWIdentity.len, ifbp->IFB_FWIdentity.len + 1 );
} else if ( lp->wlags49_type == 1 ) {
- len += sprintf(buf+len,"Channel: 0x%04X\n", lp->Channel );
-/****** len += sprintf(buf+len,"slock: %d\n", lp->slock ); */
+ seq_printf(m, "Channel: 0x%04X\n", lp->Channel );
+/****** seq_printf(m, "slock: %d\n", lp->slock ); */
//x struct tq_struct "task: 0x%04X\n", lp->task );
//x struct net_device_stats "stats: 0x%04X\n", lp->stats );
#ifdef WIRELESS_EXT
//x struct iw_statistics "wstats: 0x%04X\n", lp->wstats );
-//x len += sprintf(buf+len,"spy_number: 0x%04X\n", lp->spy_number );
+//x seq_printf(m, "spy_number: 0x%04X\n", lp->spy_number );
//x u_char spy_address[IW_MAX_SPY][ETH_ALEN];
//x struct iw_quality spy_stat[IW_MAX_SPY];
#endif // WIRELESS_EXT
- len += sprintf(buf+len,"IFB: 0x%p\n", &lp->hcfCtx );
- len += sprintf(buf+len,"flags: %#.8lX\n", lp->flags ); //;?use this format from now on
- len += sprintf(buf+len,"DebugFlag(wl_private) 0x%04X\n", lp->DebugFlag );
+ seq_printf(m, "IFB: 0x%p\n", &lp->hcfCtx );
+ seq_printf(m, "flags: %#.8lX\n", lp->flags ); //;?use this format from now on
+ seq_printf(m, "DebugFlag(wl_private) 0x%04X\n", lp->DebugFlag );
#if DBG
- len += sprintf(buf+len,"DebugFlag (DbgInfo): 0x%08lX\n", DbgInfo->DebugFlag );
+ seq_printf(m, "DebugFlag (DbgInfo): 0x%08lX\n", DbgInfo->DebugFlag );
#endif // DBG
- len += sprintf(buf+len,"is_registered: 0x%04X\n", lp->is_registered );
+ seq_printf(m, "is_registered: 0x%04X\n", lp->is_registered );
//x CFG_DRV_INFO_STRCT "driverInfo: 0x%04X\n", lp->driverInfo );
- len += printf_strct( "driverInfo", &buf[len], (hcf_16*)&lp->driverInfo );
+ printf_strct( m, "driverInfo", (hcf_16*)&lp->driverInfo );
//x CFG_IDENTITY_STRCT "driverIdentity: 0x%04X\n", lp->driverIdentity );
- len += printf_strct( "driverIdentity", &buf[len], (hcf_16*)&lp->driverIdentity );
+ printf_strct( m, "driverIdentity", (hcf_16*)&lp->driverIdentity );
//x CFG_FW_IDENTITY_STRCT "StationIdentity: 0x%04X\n", lp->StationIdentity );
- len += printf_strct( "StationIdentity", &buf[len], (hcf_16*)&lp->StationIdentity );
+ printf_strct( m, "StationIdentity", (hcf_16*)&lp->StationIdentity );
//x CFG_PRI_IDENTITY_STRCT "PrimaryIdentity: 0x%04X\n", lp->PrimaryIdentity );
- len += printf_strct( "PrimaryIdentity", &buf[len], (hcf_16*)&lp->hcfCtx.IFB_PRIIdentity );
- len += printf_strct( "PrimarySupplier", &buf[len], (hcf_16*)&lp->hcfCtx.IFB_PRISup );
+ printf_strct( m, "PrimaryIdentity", (hcf_16*)&lp->hcfCtx.IFB_PRIIdentity );
+ printf_strct( m, "PrimarySupplier", (hcf_16*)&lp->hcfCtx.IFB_PRISup );
//x CFG_PRI_IDENTITY_STRCT "NICIdentity: 0x%04X\n", lp->NICIdentity );
- len += printf_strct( "NICIdentity", &buf[len], (hcf_16*)&lp->NICIdentity );
+ printf_strct( m, "NICIdentity", (hcf_16*)&lp->NICIdentity );
//x ltv_t "ltvRecord: 0x%04X\n", lp->ltvRecord );
- len += sprintf(buf+len,"txBytes: 0x%08lX\n", lp->txBytes );
- len += sprintf(buf+len,"maxPort: 0x%04X\n", lp->maxPort ); /* 0 for STA, 6 for AP */
- /* Elements used for async notification from hardware */
+ seq_printf(m, "txBytes: 0x%08lX\n", lp->txBytes );
+ seq_printf(m, "maxPort: 0x%04X\n", lp->maxPort ); /* 0 for STA, 6 for AP */
+ /* Elements used for async notification from hardware */
//x RID_LOG_STRCT RidList[10];
//x ltv_t "updatedRecord: 0x%04X\n", lp->updatedRecord );
//x PROBE_RESP "ProbeResp: 0x%04X\n", lp->ProbeResp );
//x ASSOC_STATUS_STRCT "assoc_stat: 0x%04X\n", lp->assoc_stat );
//x SECURITY_STATUS_STRCT "sec_stat: 0x%04X\n", lp->sec_stat );
//x u_char lookAheadBuf[WVLAN_MAX_LOOKAHEAD];
- len += sprintf(buf+len,"PortType: 0x%04X\n", lp->PortType ); // 1 - 3 (1 [Normal] | 3 [AdHoc])
- len += sprintf(buf+len,"Channel: 0x%04X\n", lp->Channel ); // 0 - 14 (0)
+ seq_printf(m, "PortType: 0x%04X\n", lp->PortType ); // 1 - 3 (1 [Normal] | 3 [AdHoc])
+ seq_printf(m, "Channel: 0x%04X\n", lp->Channel ); // 0 - 14 (0)
//x hcf_16 TxRateControl[2];
- len += sprintf(buf+len,"TxRateControl[2]: 0x%04X 0x%04X\n",
- lp->TxRateControl[0], lp->TxRateControl[1] );
- len += sprintf(buf+len,"DistanceBetweenAPs: 0x%04X\n", lp->DistanceBetweenAPs ); // 1 - 3 (1)
- len += sprintf(buf+len,"RTSThreshold: 0x%04X\n", lp->RTSThreshold ); // 0 - 2347 (2347)
- len += sprintf(buf+len,"PMEnabled: 0x%04X\n", lp->PMEnabled ); // 0 - 2, 8001 - 8002 (0)
- len += sprintf(buf+len,"MicrowaveRobustness: 0x%04X\n", lp->MicrowaveRobustness );// 0 - 1 (0)
- len += sprintf(buf+len,"CreateIBSS: 0x%04X\n", lp->CreateIBSS ); // 0 - 1 (0)
- len += sprintf(buf+len,"MulticastReceive: 0x%04X\n", lp->MulticastReceive ); // 0 - 1 (1)
- len += sprintf(buf+len,"MaxSleepDuration: 0x%04X\n", lp->MaxSleepDuration ); // 0 - 65535 (100)
+ seq_printf(m, "TxRateControl[2]: 0x%04X 0x%04X\n",
+ lp->TxRateControl[0], lp->TxRateControl[1] );
+ seq_printf(m, "DistanceBetweenAPs: 0x%04X\n", lp->DistanceBetweenAPs ); // 1 - 3 (1)
+ seq_printf(m, "RTSThreshold: 0x%04X\n", lp->RTSThreshold ); // 0 - 2347 (2347)
+ seq_printf(m, "PMEnabled: 0x%04X\n", lp->PMEnabled ); // 0 - 2, 8001 - 8002 (0)
+ seq_printf(m, "MicrowaveRobustness: 0x%04X\n", lp->MicrowaveRobustness );// 0 - 1 (0)
+ seq_printf(m, "CreateIBSS: 0x%04X\n", lp->CreateIBSS ); // 0 - 1 (0)
+ seq_printf(m, "MulticastReceive: 0x%04X\n", lp->MulticastReceive ); // 0 - 1 (1)
+ seq_printf(m, "MaxSleepDuration: 0x%04X\n", lp->MaxSleepDuration ); // 0 - 65535 (100)
//x hcf_8 MACAddress[ETH_ALEN];
- len += printf_hcf_8( "MACAddress", &buf[len], lp->MACAddress, ETH_ALEN );
+ printf_hcf_8(m, "MACAddress", lp->MACAddress, ETH_ALEN );
//x char NetworkName[HCF_MAX_NAME_LEN+1];
- len += sprintf(buf+len,"NetworkName: %.32s\n", lp->NetworkName );
+ seq_printf(m, "NetworkName: %.32s\n", lp->NetworkName );
//x char StationName[HCF_MAX_NAME_LEN+1];
- len += sprintf(buf+len,"EnableEncryption: 0x%04X\n", lp->EnableEncryption ); // 0 - 1 (0)
+ seq_printf(m, "EnableEncryption: 0x%04X\n", lp->EnableEncryption ); // 0 - 1 (0)
//x char Key1[MAX_KEY_LEN+1];
- len += printf_hcf_8( "Key1", &buf[len], lp->Key1, MAX_KEY_LEN );
+ printf_hcf_8( m, "Key1", lp->Key1, MAX_KEY_LEN );
//x char Key2[MAX_KEY_LEN+1];
//x char Key3[MAX_KEY_LEN+1];
//x char Key4[MAX_KEY_LEN+1];
- len += sprintf(buf+len,"TransmitKeyID: 0x%04X\n", lp->TransmitKeyID ); // 1 - 4 (1)
+ seq_printf(m, "TransmitKeyID: 0x%04X\n", lp->TransmitKeyID ); // 1 - 4 (1)
//x CFG_DEFAULT_KEYS_STRCT "DefaultKeys: 0x%04X\n", lp->DefaultKeys );
//x u_char mailbox[MB_SIZE];
//x char szEncryption[MAX_ENC_LEN];
- len += sprintf(buf+len,"driverEnable: 0x%04X\n", lp->driverEnable );
- len += sprintf(buf+len,"wolasEnable: 0x%04X\n", lp->wolasEnable );
- len += sprintf(buf+len,"atimWindow: 0x%04X\n", lp->atimWindow );
- len += sprintf(buf+len,"holdoverDuration: 0x%04X\n", lp->holdoverDuration );
+ seq_printf(m, "driverEnable: 0x%04X\n", lp->driverEnable );
+ seq_printf(m, "wolasEnable: 0x%04X\n", lp->wolasEnable );
+ seq_printf(m, "atimWindow: 0x%04X\n", lp->atimWindow );
+ seq_printf(m, "holdoverDuration: 0x%04X\n", lp->holdoverDuration );
//x hcf_16 MulticastRate[2];
- len += sprintf(buf+len,"authentication: 0x%04X\n", lp->authentication ); // is this AP specific?
- len += sprintf(buf+len,"promiscuousMode: 0x%04X\n", lp->promiscuousMode );
- len += sprintf(buf+len,"DownloadFirmware: 0x%04X\n", lp->DownloadFirmware ); // 0 - 2 (0 [None] | 1 [STA] | 2 [AP])
- len += sprintf(buf+len,"AuthKeyMgmtSuite: 0x%04X\n", lp->AuthKeyMgmtSuite );
- len += sprintf(buf+len,"loadBalancing: 0x%04X\n", lp->loadBalancing );
- len += sprintf(buf+len,"mediumDistribution: 0x%04X\n", lp->mediumDistribution );
- len += sprintf(buf+len,"txPowLevel: 0x%04X\n", lp->txPowLevel );
-// len += sprintf(buf+len,"shortRetryLimit: 0x%04X\n", lp->shortRetryLimit );
-// len += sprintf(buf+len,"longRetryLimit: 0x%04X\n", lp->longRetryLimit );
+ seq_printf(m, "authentication: 0x%04X\n", lp->authentication ); // is this AP specific?
+ seq_printf(m, "promiscuousMode: 0x%04X\n", lp->promiscuousMode );
+ seq_printf(m, "DownloadFirmware: 0x%04X\n", lp->DownloadFirmware ); // 0 - 2 (0 [None] | 1 [STA] | 2 [AP])
+ seq_printf(m, "AuthKeyMgmtSuite: 0x%04X\n", lp->AuthKeyMgmtSuite );
+ seq_printf(m, "loadBalancing: 0x%04X\n", lp->loadBalancing );
+ seq_printf(m, "mediumDistribution: 0x%04X\n", lp->mediumDistribution );
+ seq_printf(m, "txPowLevel: 0x%04X\n", lp->txPowLevel );
+// seq_printf(m, "shortRetryLimit: 0x%04X\n", lp->shortRetryLimit );
+// seq_printf(m, "longRetryLimit: 0x%04X\n", lp->longRetryLimit );
//x hcf_16 srsc[2];
//x hcf_16 brsc[2];
- len += sprintf(buf+len,"connectionControl: 0x%04X\n", lp->connectionControl );
+ seq_printf(m, "connectionControl: 0x%04X\n", lp->connectionControl );
//x //hcf_16 probeDataRates[2];
- len += sprintf(buf+len,"ownBeaconInterval: 0x%04X\n", lp->ownBeaconInterval );
- len += sprintf(buf+len,"coexistence: 0x%04X\n", lp->coexistence );
+ seq_printf(m, "ownBeaconInterval: 0x%04X\n", lp->ownBeaconInterval );
+ seq_printf(m, "coexistence: 0x%04X\n", lp->coexistence );
//x WVLAN_FRAME "txF: 0x%04X\n", lp->txF );
//x WVLAN_LFRAME txList[DEFAULT_NUM_TX_FRAMES];
//x struct list_head "txFree: 0x%04X\n", lp->txFree );
//x struct list_head txQ[WVLAN_MAX_TX_QUEUES];
- len += sprintf(buf+len,"netif_queue_on: 0x%04X\n", lp->netif_queue_on );
- len += sprintf(buf+len,"txQ_count: 0x%04X\n", lp->txQ_count );
+ seq_printf(m, "netif_queue_on: 0x%04X\n", lp->netif_queue_on );
+ seq_printf(m, "txQ_count: 0x%04X\n", lp->txQ_count );
//x DESC_STRCT "desc_rx: 0x%04X\n", lp->desc_rx );
//x DESC_STRCT "desc_tx: 0x%04X\n", lp->desc_tx );
//x WVLAN_PORT_STATE "portState: 0x%04X\n", lp->portState );
//x ScanResult "scan_results: 0x%04X\n", lp->scan_results );
//x ProbeResult "probe_results: 0x%04X\n", lp->probe_results );
- len += sprintf(buf+len,"probe_num_aps: 0x%04X\n", lp->probe_num_aps );
- len += sprintf(buf+len,"use_dma: 0x%04X\n", lp->use_dma );
+ seq_printf(m, "probe_num_aps: 0x%04X\n", lp->probe_num_aps );
+ seq_printf(m, "use_dma: 0x%04X\n", lp->use_dma );
//x DMA_STRCT "dma: 0x%04X\n", lp->dma );
#ifdef USE_RTS
- len += sprintf(buf+len,"useRTS: 0x%04X\n", lp->useRTS );
+ seq_printf(m, "useRTS: 0x%04X\n", lp->useRTS );
#endif // USE_RTS
#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
//;?should we restore this to allow smaller memory footprint
//;?I guess not. This should be brought under Debug mode only
- len += sprintf(buf+len,"DTIMPeriod: 0x%04X\n", lp->DTIMPeriod ); // 1 - 255 (1)
- len += sprintf(buf+len,"multicastPMBuffering: 0x%04X\n", lp->multicastPMBuffering );
- len += sprintf(buf+len,"RejectAny: 0x%04X\n", lp->RejectAny ); // 0 - 1 (0)
- len += sprintf(buf+len,"ExcludeUnencrypted: 0x%04X\n", lp->ExcludeUnencrypted ); // 0 - 1 (1)
- len += sprintf(buf+len,"intraBSSRelay: 0x%04X\n", lp->intraBSSRelay );
- len += sprintf(buf+len,"wlags49_type: 0x%08lX\n", lp->wlags49_type );
+ seq_printf(m, "DTIMPeriod: 0x%04X\n", lp->DTIMPeriod ); // 1 - 255 (1)
+ seq_printf(m, "multicastPMBuffering: 0x%04X\n", lp->multicastPMBuffering );
+ seq_printf(m, "RejectAny: 0x%04X\n", lp->RejectAny ); // 0 - 1 (0)
+ seq_printf(m, "ExcludeUnencrypted: 0x%04X\n", lp->ExcludeUnencrypted ); // 0 - 1 (1)
+ seq_printf(m, "intraBSSRelay: 0x%04X\n", lp->intraBSSRelay );
+ seq_printf(m, "wlags49_type: 0x%08lX\n", lp->wlags49_type );
#ifdef USE_WDS
//x WVLAN_WDS_IF wds_port[NUM_WDS_PORTS];
#endif // USE_WDS
#endif // HCF_AP
} else if ( lp->wlags49_type == 2 ){
- len += sprintf(buf+len,"tallies to be added\n" );
+ seq_printf(m, "tallies to be added\n" );
//Hermes Tallies (IFB substructure) {
- p = &lp->hcfCtx.IFB_NIC_Tallies;
- len += sprintf(buf+len,"TxUnicastFrames: %08lX\n", p->TxUnicastFrames );
- len += sprintf(buf+len,"TxMulticastFrames: %08lX\n", p->TxMulticastFrames );
- len += sprintf(buf+len,"TxFragments: %08lX\n", p->TxFragments );
- len += sprintf(buf+len,"TxUnicastOctets: %08lX\n", p->TxUnicastOctets );
- len += sprintf(buf+len,"TxMulticastOctets: %08lX\n", p->TxMulticastOctets );
- len += sprintf(buf+len,"TxDeferredTransmissions: %08lX\n", p->TxDeferredTransmissions );
- len += sprintf(buf+len,"TxSingleRetryFrames: %08lX\n", p->TxSingleRetryFrames );
- len += sprintf(buf+len,"TxMultipleRetryFrames: %08lX\n", p->TxMultipleRetryFrames );
- len += sprintf(buf+len,"TxRetryLimitExceeded: %08lX\n", p->TxRetryLimitExceeded );
- len += sprintf(buf+len,"TxDiscards: %08lX\n", p->TxDiscards );
- len += sprintf(buf+len,"RxUnicastFrames: %08lX\n", p->RxUnicastFrames );
- len += sprintf(buf+len,"RxMulticastFrames: %08lX\n", p->RxMulticastFrames );
- len += sprintf(buf+len,"RxFragments: %08lX\n", p->RxFragments );
- len += sprintf(buf+len,"RxUnicastOctets: %08lX\n", p->RxUnicastOctets );
- len += sprintf(buf+len,"RxMulticastOctets: %08lX\n", p->RxMulticastOctets );
- len += sprintf(buf+len,"RxFCSErrors: %08lX\n", p->RxFCSErrors );
- len += sprintf(buf+len,"RxDiscardsNoBuffer: %08lX\n", p->RxDiscardsNoBuffer );
- len += sprintf(buf+len,"TxDiscardsWrongSA: %08lX\n", p->TxDiscardsWrongSA );
- len += sprintf(buf+len,"RxWEPUndecryptable: %08lX\n", p->RxWEPUndecryptable );
- len += sprintf(buf+len,"RxMsgInMsgFragments: %08lX\n", p->RxMsgInMsgFragments );
- len += sprintf(buf+len,"RxMsgInBadMsgFragments: %08lX\n", p->RxMsgInBadMsgFragments );
- len += sprintf(buf+len,"RxDiscardsWEPICVError: %08lX\n", p->RxDiscardsWEPICVError );
- len += sprintf(buf+len,"RxDiscardsWEPExcluded: %08lX\n", p->RxDiscardsWEPExcluded );
+ p = &lp->hcfCtx.IFB_NIC_Tallies;
+ seq_printf(m, "TxUnicastFrames: %08lX\n", p->TxUnicastFrames );
+ seq_printf(m, "TxMulticastFrames: %08lX\n", p->TxMulticastFrames );
+ seq_printf(m, "TxFragments: %08lX\n", p->TxFragments );
+ seq_printf(m, "TxUnicastOctets: %08lX\n", p->TxUnicastOctets );
+ seq_printf(m, "TxMulticastOctets: %08lX\n", p->TxMulticastOctets );
+ seq_printf(m, "TxDeferredTransmissions: %08lX\n", p->TxDeferredTransmissions );
+ seq_printf(m, "TxSingleRetryFrames: %08lX\n", p->TxSingleRetryFrames );
+ seq_printf(m, "TxMultipleRetryFrames: %08lX\n", p->TxMultipleRetryFrames );
+ seq_printf(m, "TxRetryLimitExceeded: %08lX\n", p->TxRetryLimitExceeded );
+ seq_printf(m, "TxDiscards: %08lX\n", p->TxDiscards );
+ seq_printf(m, "RxUnicastFrames: %08lX\n", p->RxUnicastFrames );
+ seq_printf(m, "RxMulticastFrames: %08lX\n", p->RxMulticastFrames );
+ seq_printf(m, "RxFragments: %08lX\n", p->RxFragments );
+ seq_printf(m, "RxUnicastOctets: %08lX\n", p->RxUnicastOctets );
+ seq_printf(m, "RxMulticastOctets: %08lX\n", p->RxMulticastOctets );
+ seq_printf(m, "RxFCSErrors: %08lX\n", p->RxFCSErrors );
+ seq_printf(m, "RxDiscardsNoBuffer: %08lX\n", p->RxDiscardsNoBuffer );
+ seq_printf(m, "TxDiscardsWrongSA: %08lX\n", p->TxDiscardsWrongSA );
+ seq_printf(m, "RxWEPUndecryptable: %08lX\n", p->RxWEPUndecryptable );
+ seq_printf(m, "RxMsgInMsgFragments: %08lX\n", p->RxMsgInMsgFragments );
+ seq_printf(m, "RxMsgInBadMsgFragments: %08lX\n", p->RxMsgInBadMsgFragments );
+ seq_printf(m, "RxDiscardsWEPICVError: %08lX\n", p->RxDiscardsWEPICVError );
+ seq_printf(m, "RxDiscardsWEPExcluded: %08lX\n", p->RxDiscardsWEPExcluded );
#if (HCF_EXT) & HCF_EXT_TALLIES_FW
- //to be added ;?
+ //to be added ;?
#endif // HCF_EXT_TALLIES_FW
} else if ( lp->wlags49_type & 0x8000 ) { //;?kludgy but it is unclear to me were else to place this
#if DBG
@@ -3761,27 +3760,19 @@ int scull_read_procmem(char *buf, char **start, off_t offset, int len, int *eof,
#endif // DBG
lp->wlags49_type = 0; //default to IFB again ;?
} else {
- len += sprintf(buf+len,"unknown value for wlags49_type: 0x%08lX\n", lp->wlags49_type );
- len += sprintf(buf+len,"0x0000 - IFB\n" );
- len += sprintf(buf+len,"0x0001 - wl_private\n" );
- len += sprintf(buf+len,"0x0002 - Tallies\n" );
- len += sprintf(buf+len,"0x8xxx - Change debufflag\n" );
- len += sprintf(buf+len,"ERROR 0001\nWARNING 0002\nNOTICE 0004\nTRACE 0008\n" );
- len += sprintf(buf+len,"VERBOSE 0010\nPARAM 0020\nBREAK 0040\nRX 0100\n" );
- len += sprintf(buf+len,"TX 0200\nDS 0400\n" );
- }
- return len;
+ seq_printf(m, "unknown value for wlags49_type: 0x%08lX\n", lp->wlags49_type );
+ seq_puts(m,
+ "0x0000 - IFB\n"
+ "0x0001 - wl_private\n"
+ "0x0002 - Tallies\n"
+ "0x8xxx - Change debufflag\n"
+ "ERROR 0001\nWARNING 0002\nNOTICE 0004\nTRACE 0008\n"
+ "VERBOSE 0010\nPARAM 0020\nBREAK 0040\nRX 0100\n"
+ "TX 0200\nDS 0400\n");
+ }
+ return 0;
} // scull_read_procmem
-static void proc_write(const char *name, write_proc_t *w, void *data)
-{
- struct proc_dir_entry * entry = create_proc_entry(name, S_IFREG | S_IWUSR, NULL);
- if (entry) {
- entry->write_proc = w;
- entry->data = data;
- }
-} // proc_write
-
static int write_int(struct file *file, const char *buffer, unsigned long count, void *data)
{
static char proc_number[11];
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 2d444b1..7c90814 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -79,11 +79,10 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
}
-static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
- char **start, off_t offset,
- int length, int inout)
+static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- return sprintf(buffer, "tcm_loop_proc_info()\n");
+ seq_printf(m, "tcm_loop_proc_info()\n");
+ return 0;
}
static int tcm_loop_driver_probe(struct device *);
@@ -336,7 +335,7 @@ static int tcm_loop_slave_configure(struct scsi_device *sd)
}
static struct scsi_host_template tcm_loop_driver_template = {
- .proc_info = tcm_loop_proc_info,
+ .show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index 46568c0..b777ae6 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -39,8 +39,6 @@
#include <linux/cpu_cooling.h>
#include <linux/of.h>
-#include <plat/cpu.h>
-
/* Exynos generic registers */
#define EXYNOS_TMU_REG_TRIMINFO 0x0
#define EXYNOS_TMU_REG_CONTROL 0x20
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index b2e9e17..8ab70a6 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -267,7 +267,7 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
}
}
-static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
+static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
struct amba_pl011_data *plat = uap->port.dev->platform_data;
@@ -281,20 +281,25 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_chan *chan;
dma_cap_mask_t mask;
- /* We need platform data */
- if (!plat || !plat->dma_filter) {
- dev_info(uap->port.dev, "no DMA platform data\n");
- return;
- }
+ chan = dma_request_slave_channel(dev, "tx");
- /* Try to acquire a generic DMA engine slave TX channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
if (!chan) {
- dev_err(uap->port.dev, "no TX DMA channel!\n");
- return;
+ /* We need platform data */
+ if (!plat || !plat->dma_filter) {
+ dev_info(uap->port.dev, "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave TX channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, plat->dma_filter,
+ plat->dma_tx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no TX DMA channel!\n");
+ return;
+ }
}
dmaengine_slave_config(chan, &tx_conf);
@@ -304,7 +309,18 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
dma_chan_name(uap->dmatx.chan));
/* Optionally make use of an RX channel as well */
- if (plat->dma_rx_param) {
+ chan = dma_request_slave_channel(dev, "rx");
+
+ if (!chan && plat->dma_rx_param) {
+ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+
+ if (!chan) {
+ dev_err(uap->port.dev, "no RX DMA channel!\n");
+ return;
+ }
+ }
+
+ if (chan) {
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase + UART01x_DR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
@@ -313,12 +329,6 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
.device_fc = false,
};
- chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
- if (!chan) {
- dev_err(uap->port.dev, "no RX DMA channel!\n");
- return;
- }
-
dmaengine_slave_config(chan, &rx_conf);
uap->dmarx.chan = chan;
@@ -360,6 +370,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_uap {
struct list_head node;
struct uart_amba_port *uap;
+ struct device *dev;
};
static LIST_HEAD(pl011_dma_uarts);
@@ -370,7 +381,7 @@ static int __init pl011_dma_initcall(void)
list_for_each_safe(node, tmp, &pl011_dma_uarts) {
struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
- pl011_dma_probe_initcall(dmau->uap);
+ pl011_dma_probe_initcall(dmau->dev, dmau->uap);
list_del(node);
kfree(dmau);
}
@@ -379,18 +390,19 @@ static int __init pl011_dma_initcall(void)
device_initcall(pl011_dma_initcall);
-static void pl011_dma_probe(struct uart_amba_port *uap)
+static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
{
struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
if (dmau) {
dmau->uap = uap;
+ dmau->dev = dev;
list_add_tail(&dmau->node, &pl011_dma_uarts);
}
}
#else
-static void pl011_dma_probe(struct uart_amba_port *uap)
+static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
{
- pl011_dma_probe_initcall(uap);
+ pl011_dma_probe_initcall(dev, uap);
}
#endif
@@ -1096,7 +1108,7 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct uart_amba_port *uap)
+static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
{
}
@@ -2155,7 +2167,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.ops = &amba_pl011_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
- pl011_dma_probe(uap);
+ pl011_dma_probe(&dev->dev, uap);
/* Ensure interrupts from this UART are masked and cleared */
writew(0, uap->port.membase + UART011_IMSC);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 62e7d3b..4f5f161 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -35,7 +35,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
-#include <linux/fsl/mxs-dma.h>
+#include <linux/dmaengine.h>
#include <asm/cacheflush.h>
@@ -148,11 +148,6 @@ struct mxs_auart_port {
struct device *dev;
/* for DMA */
- struct mxs_dma_data dma_data;
- int dma_channel_rx, dma_channel_tx;
- int dma_irq_rx, dma_irq_tx;
- int dma_channel;
-
struct scatterlist tx_sgl;
struct dma_chan *tx_dma_chan;
void *tx_dma_buf;
@@ -440,20 +435,6 @@ static u32 mxs_auart_get_mctrl(struct uart_port *u)
return mctrl;
}
-static bool mxs_auart_dma_filter(struct dma_chan *chan, void *param)
-{
- struct mxs_auart_port *s = param;
-
- if (!mxs_dma_is_apbx(chan))
- return false;
-
- if (s->dma_channel == chan->chan_id) {
- chan->private = &s->dma_data;
- return true;
- }
- return false;
-}
-
static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s);
static void dma_rx_callback(void *arg)
{
@@ -545,21 +526,11 @@ static void mxs_auart_dma_exit(struct mxs_auart_port *s)
static int mxs_auart_dma_init(struct mxs_auart_port *s)
{
- dma_cap_mask_t mask;
-
if (auart_dma_enabled(s))
return 0;
- /* We do not get the right DMA channels. */
- if (s->dma_channel_rx == -1 || s->dma_channel_tx == -1)
- return -EINVAL;
-
/* init for RX */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- s->dma_channel = s->dma_channel_rx;
- s->dma_data.chan_irq = s->dma_irq_rx;
- s->rx_dma_chan = dma_request_channel(mask, mxs_auart_dma_filter, s);
+ s->rx_dma_chan = dma_request_slave_channel(s->dev, "rx");
if (!s->rx_dma_chan)
goto err_out;
s->rx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA);
@@ -567,9 +538,7 @@ static int mxs_auart_dma_init(struct mxs_auart_port *s)
goto err_out;
/* init for TX */
- s->dma_channel = s->dma_channel_tx;
- s->dma_data.chan_irq = s->dma_irq_tx;
- s->tx_dma_chan = dma_request_channel(mask, mxs_auart_dma_filter, s);
+ s->tx_dma_chan = dma_request_slave_channel(s->dev, "tx");
if (!s->tx_dma_chan)
goto err_out;
s->tx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA);
@@ -1020,7 +989,6 @@ static int serial_mxs_probe_dt(struct mxs_auart_port *s,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- u32 dma_channel[2];
int ret;
if (!np)
@@ -1034,20 +1002,8 @@ static int serial_mxs_probe_dt(struct mxs_auart_port *s,
}
s->port.line = ret;
- s->dma_irq_rx = platform_get_irq(pdev, 1);
- s->dma_irq_tx = platform_get_irq(pdev, 2);
+ s->flags |= MXS_AUART_DMA_CONFIG;
- ret = of_property_read_u32_array(np, "fsl,auart-dma-channel",
- dma_channel, 2);
- if (ret == 0) {
- s->dma_channel_rx = dma_channel[0];
- s->dma_channel_tx = dma_channel[1];
-
- s->flags |= MXS_AUART_DMA_CONFIG;
- } else {
- s->dma_channel_rx = -1;
- s->dma_channel_tx = -1;
- }
return 0;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 8fbb6d2..f87dbfd 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1711,7 +1711,7 @@ static int uart_proc_show(struct seq_file *m, void *v)
static int uart_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, uart_proc_show, PDE(inode)->data);
+ return single_open(file, uart_proc_show, PDE_DATA(inode));
}
static const struct file_operations uart_proc_fops = {
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 451687c..0d84657 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1592,6 +1592,7 @@ static int __init sunsu_init(void)
static void __exit sunsu_exit(void)
{
+ platform_driver_unregister(&su_driver);
if (sunsu_reg.nr)
sunserial_unregister_minors(&sunsu_reg, sunsu_reg.nr);
}
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index a690d64..073b938 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -221,7 +221,7 @@ static int proc_udc_show(struct seq_file *s, void *unused)
static int proc_udc_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_udc_show, PDE(inode)->data);
+ return single_open(file, proc_udc_show, PDE_DATA(inode));
}
static const struct file_operations proc_ops = {
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index c377ff8..f394f29 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -727,7 +727,6 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
}
static const struct file_operations ffs_ep0_operations = {
- .owner = THIS_MODULE,
.llseek = no_llseek,
.open = ffs_ep0_open,
@@ -947,7 +946,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
static const struct file_operations ffs_epfile_operations = {
- .owner = THIS_MODULE,
.llseek = no_llseek,
.open = ffs_epfile_open,
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 7c2a101..a766a4c 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -2010,47 +2010,37 @@ static int fsl_udc_stop(struct usb_gadget *g,
static const char proc_filename[] = "driver/fsl_usb2_udc";
-static int fsl_proc_read(char *page, char **start, off_t off, int count,
- int *eof, void *_dev)
+static int fsl_proc_read(struct seq_file *m, void *v)
{
- char *buf = page;
- char *next = buf;
- unsigned size = count;
unsigned long flags;
- int t, i;
+ int i;
u32 tmp_reg;
struct fsl_ep *ep = NULL;
struct fsl_req *req;
struct fsl_udc *udc = udc_controller;
- if (off != 0)
- return 0;
spin_lock_irqsave(&udc->lock, flags);
/* ------basic driver information ---- */
- t = scnprintf(next, size,
+ seq_printf(m,
DRIVER_DESC "\n"
"%s version: %s\n"
"Gadget driver: %s\n\n",
driver_name, DRIVER_VERSION,
udc->driver ? udc->driver->driver.name : "(none)");
- size -= t;
- next += t;
/* ------ DR Registers ----- */
tmp_reg = fsl_readl(&dr_regs->usbcmd);
- t = scnprintf(next, size,
+ seq_printf(m,
"USBCMD reg:\n"
"SetupTW: %d\n"
"Run/Stop: %s\n\n",
(tmp_reg & USB_CMD_SUTW) ? 1 : 0,
(tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->usbsts);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Status Reg:\n"
"Dr Suspend: %d Reset Received: %d System Error: %s "
"USB Error Interrupt: %s\n\n",
@@ -2058,11 +2048,9 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
(tmp_reg & USB_STS_RESET) ? 1 : 0,
(tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
(tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->usbintr);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Interrupt Enable Reg:\n"
"Sleep Enable: %d SOF Received Enable: %d "
"Reset Enable: %d\n"
@@ -2076,33 +2064,25 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
(tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
(tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
(tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->frindex);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Frame Index Reg: Frame Number is 0x%x\n\n",
(tmp_reg & USB_FRINDEX_MASKS));
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->deviceaddr);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Device Address Reg: Device Addr is 0x%x\n\n",
(tmp_reg & USB_DEVICE_ADDRESS_MASK));
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Endpoint List Address Reg: "
"Device Addr is 0x%x\n\n",
(tmp_reg & USB_EP_LIST_ADDRESS_MASK));
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->portsc1);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Port Status&Control Reg:\n"
"Port Transceiver Type : %s Port Speed: %s\n"
"PHY Low Power Suspend: %s Port Reset: %s "
@@ -2111,7 +2091,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
"Port Enable/Disable Change: %s\n"
"Port Enabled/Disabled: %s "
"Current Connect Status: %s\n\n", ( {
- char *s;
+ const char *s;
switch (tmp_reg & PORTSCX_PTS_FSLS) {
case PORTSCX_PTS_UTMI:
s = "UTMI"; break;
@@ -2137,13 +2117,11 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
"Not correct",
(tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
"Attached" : "Not-Att");
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->usbmode);
- t = scnprintf(next, size,
+ seq_printf(m,
"USB Mode Reg: Controller Mode is: %s\n\n", ( {
- char *s;
+ const char *s;
switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
case USB_MODE_CTRL_MODE_IDLE:
s = "Idle"; break;
@@ -2156,103 +2134,87 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
}
s;
} ));
- size -= t;
- next += t;
tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
- t = scnprintf(next, size,
+ seq_printf(m,
"Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
(tmp_reg & EP_SETUP_STATUS_MASK));
- size -= t;
- next += t;
for (i = 0; i < udc->max_ep / 2; i++) {
tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
- t = scnprintf(next, size, "EP Ctrl Reg [0x%x]: = [0x%x]\n",
- i, tmp_reg);
- size -= t;
- next += t;
+ seq_printf(m, "EP Ctrl Reg [0x%x]: = [0x%x]\n", i, tmp_reg);
}
tmp_reg = fsl_readl(&dr_regs->endpointprime);
- t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
- size -= t;
- next += t;
+ seq_printf(m, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
#ifndef CONFIG_ARCH_MXC
if (udc->pdata->have_sysif_regs) {
tmp_reg = usb_sys_regs->snoop1;
- t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
- size -= t;
- next += t;
+ seq_printf(m, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
tmp_reg = usb_sys_regs->control;
- t = scnprintf(next, size, "General Control Reg : = [0x%x]\n\n",
- tmp_reg);
- size -= t;
- next += t;
+ seq_printf(m, "General Control Reg : = [0x%x]\n\n", tmp_reg);
}
#endif
/* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
ep = &udc->eps[0];
- t = scnprintf(next, size, "For %s Maxpkt is 0x%x index is 0x%x\n",
+ seq_printf(m, "For %s Maxpkt is 0x%x index is 0x%x\n",
ep->ep.name, ep_maxpacket(ep), ep_index(ep));
- size -= t;
- next += t;
if (list_empty(&ep->queue)) {
- t = scnprintf(next, size, "its req queue is empty\n\n");
- size -= t;
- next += t;
+ seq_puts(m, "its req queue is empty\n\n");
} else {
list_for_each_entry(req, &ep->queue, queue) {
- t = scnprintf(next, size,
+ seq_printf(m,
"req %p actual 0x%x length 0x%x buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
- size -= t;
- next += t;
}
}
/* other gadget->eplist ep */
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
if (ep->ep.desc) {
- t = scnprintf(next, size,
+ seq_printf(m,
"\nFor %s Maxpkt is 0x%x "
"index is 0x%x\n",
ep->ep.name, ep_maxpacket(ep),
ep_index(ep));
- size -= t;
- next += t;
if (list_empty(&ep->queue)) {
- t = scnprintf(next, size,
- "its req queue is empty\n\n");
- size -= t;
- next += t;
+ seq_puts(m, "its req queue is empty\n\n");
} else {
list_for_each_entry(req, &ep->queue, queue) {
- t = scnprintf(next, size,
+ seq_printf(m,
"req %p actual 0x%x length "
"0x%x buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
- size -= t;
- next += t;
- } /* end for each_entry of ep req */
- } /* end for else */
- } /* end for if(ep->queue) */
- } /* end (ep->desc) */
+ } /* end for each_entry of ep req */
+ } /* end for else */
+ } /* end for if(ep->queue) */
+ } /* end (ep->desc) */
spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
- *eof = 1;
- return count - size;
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int fsl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fsl_proc_read, NULL);
}
-#define create_proc_file() create_proc_read_entry(proc_filename, \
- 0, NULL, fsl_proc_read, NULL)
+static const struct file_operations fsl_proc_fops = {
+ .open = fsl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#define create_proc_file() proc_create(proc_filename, 0, NULL, &fsl_proc_fops)
#define remove_proc_file() remove_proc_entry(proc_filename, NULL)
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 991aba3..52dd6cc 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -1005,7 +1006,7 @@ static const struct usb_gadget_ops goku_ops = {
/*-------------------------------------------------------------------------*/
-static inline char *dmastr(void)
+static inline const char *dmastr(void)
{
if (use_dma == 0)
return "(dma disabled)";
@@ -1022,13 +1023,10 @@ static const char proc_node_name [] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
-static void
-dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
+static void dump_intmask(struct seq_file *m, const char *label, u32 mask)
{
- int t;
-
/* int_status is the same format ... */
- t = scnprintf(*next, *size,
+ seq_printf(m,
"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
label, mask,
(mask & INT_PWRDETECT) ? " power" : "",
@@ -1055,33 +1053,23 @@ dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
(mask & INT_ENDPOINT0) ? " ep0" : "",
(mask & INT_USBRESET) ? " reset" : "",
(mask & INT_SUSPEND) ? " suspend" : "");
- *size -= t;
- *next += t;
}
-static int
-udc_proc_read(char *buffer, char **start, off_t off, int count,
- int *eof, void *_dev)
+static int udc_proc_read(struct seq_file *m, void *v)
{
- char *buf = buffer;
- struct goku_udc *dev = _dev;
+ struct goku_udc *dev = m->private;
struct goku_udc_regs __iomem *regs = dev->regs;
- char *next = buf;
- unsigned size = count;
unsigned long flags;
- int i, t, is_usb_connected;
+ int i, is_usb_connected;
u32 tmp;
- if (off != 0)
- return 0;
-
local_irq_save(flags);
/* basic device status */
tmp = readl(&regs->power_detect);
is_usb_connected = tmp & PW_DETECT;
- t = scnprintf(next, size,
+ seq_printf(m,
"%s - %s\n"
"%s version: %s %s\n"
"Gadget driver: %s\n"
@@ -1093,7 +1081,7 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
- ({char *state;
+ ({const char *state;
switch(dev->ep0state){
case EP0_DISCONNECT: state = "ep0_disconnect"; break;
case EP0_IDLE: state = "ep0_idle"; break;
@@ -1105,27 +1093,24 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
default: state = "ep0_?"; break;
} state; })
);
- size -= t;
- next += t;
- dump_intmask("int_status", readl(&regs->int_status), &next, &size);
- dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
+ dump_intmask(m, "int_status", readl(&regs->int_status));
+ dump_intmask(m, "int_enable", readl(&regs->int_enable));
if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
goto done;
/* registers for (active) device and ep0 */
- t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
+ if (seq_printf(m, "\nirqs %lu\ndataset %02x "
"single.bcs %02x.%02x state %x addr %u\n",
dev->irqs, readl(&regs->DataSet),
readl(&regs->EPxSingle), readl(&regs->EPxBCS),
readl(&regs->UsbState),
- readl(&regs->address));
- size -= t;
- next += t;
+ readl(&regs->address)) < 0)
+ goto done;
tmp = readl(&regs->dma_master);
- t = scnprintf(next, size,
+ if (seq_printf(m,
"dma %03X =" EIGHTBITS "%s %s\n", tmp,
(tmp & MST_EOPB_DIS) ? " eopb-" : "",
(tmp & MST_EOPB_ENA) ? " eopb+" : "",
@@ -1140,9 +1125,8 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
(tmp & MST_WR_ENA) ? " OUT" : "",
(tmp & MST_CONNECTION)
? "ep1in/ep2out"
- : "ep1out/ep2in");
- size -= t;
- next += t;
+ : "ep1out/ep2in") < 0)
+ goto done;
/* dump endpoint queues */
for (i = 0; i < 4; i++) {
@@ -1153,7 +1137,7 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
continue;
tmp = readl(ep->reg_status);
- t = scnprintf(next, size,
+ if (seq_printf(m,
"%s %s max %u %s, irqs %lu, "
"status %02x (%s) " FOURBITS "\n",
ep->ep.name,
@@ -1186,18 +1170,12 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
- );
- if (t <= 0 || t > size)
+ ) < 0)
goto done;
- size -= t;
- next += t;
if (list_empty(&ep->queue)) {
- t = scnprintf(next, size, "\t(nothing queued)\n");
- if (t <= 0 || t > size)
+ if (seq_puts(m, "\t(nothing queued)\n") < 0)
goto done;
- size -= t;
- next += t;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
@@ -1211,23 +1189,34 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
} else
tmp = req->req.actual;
- t = scnprintf(next, size,
+ if (seq_printf(m,
"\treq %p len %u/%u buf %p\n",
&req->req, tmp, req->req.length,
- req->req.buf);
- if (t <= 0 || t > size)
+ req->req.buf) < 0)
goto done;
- size -= t;
- next += t;
}
}
done:
local_irq_restore(flags);
- *eof = 1;
- return count - size;
+ return 0;
+}
+
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int udc_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, udc_proc_read, PDE_DATA(file_inode(file)));
}
+static const struct file_operations udc_proc_fops = {
+ .open = udc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
@@ -1796,7 +1785,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
- create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
+ proc_create_data(proc_node_name, 0, NULL, &udc_proc_fops, dev);
#endif
retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index e2b2e9c..570c005 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -24,6 +24,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
+#include <linux/mmu_context.h>
+#include <linux/aio.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@@ -513,6 +515,9 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
struct kiocb_priv {
struct usb_request *req;
struct ep_data *epdata;
+ struct kiocb *iocb;
+ struct mm_struct *mm;
+ struct work_struct work;
void *buf;
const struct iovec *iv;
unsigned long nr_segs;
@@ -528,7 +533,6 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
local_irq_disable();
epdata = priv->epdata;
// spin_lock(&epdata->dev->lock);
- kiocbSetCancelled(iocb);
if (likely(epdata && epdata->ep && priv->req))
value = usb_ep_dequeue (epdata->ep, priv->req);
else
@@ -540,15 +544,12 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
return value;
}
-static ssize_t ep_aio_read_retry(struct kiocb *iocb)
+static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
{
- struct kiocb_priv *priv = iocb->private;
ssize_t len, total;
void *to_copy;
int i;
- /* we "retry" to get the right mm context for this: */
-
/* copy stuff into user buffers */
total = priv->actual;
len = 0;
@@ -568,9 +569,26 @@ static ssize_t ep_aio_read_retry(struct kiocb *iocb)
if (total == 0)
break;
}
+
+ return len;
+}
+
+static void ep_user_copy_worker(struct work_struct *work)
+{
+ struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
+ struct mm_struct *mm = priv->mm;
+ struct kiocb *iocb = priv->iocb;
+ size_t ret;
+
+ use_mm(mm);
+ ret = ep_copy_to_user(priv);
+ unuse_mm(mm);
+
+ /* completing the iocb can drop the ctx and mm, don't touch mm after */
+ aio_complete(iocb, ret, ret);
+
kfree(priv->buf);
kfree(priv);
- return len;
}
static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
@@ -596,14 +614,14 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
aio_complete(iocb, req->actual ? req->actual : req->status,
req->status);
} else {
- /* retry() won't report both; so we hide some faults */
+ /* ep_copy_to_user() won't report both; we hide some faults */
if (unlikely(0 != req->status))
DBG(epdata->dev, "%s fault %d len %d\n",
ep->name, req->status, req->actual);
priv->buf = req->buf;
priv->actual = req->actual;
- kick_iocb(iocb);
+ schedule_work(&priv->work);
}
spin_unlock(&epdata->dev->lock);
@@ -633,8 +651,10 @@ fail:
return value;
}
iocb->private = priv;
+ priv->iocb = iocb;
priv->iv = iv;
priv->nr_segs = nr_segs;
+ INIT_WORK(&priv->work, ep_user_copy_worker);
value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
if (unlikely(value < 0)) {
@@ -642,10 +662,11 @@ fail:
goto fail;
}
- iocb->ki_cancel = ep_aio_cancel;
+ kiocb_set_cancel_fn(iocb, ep_aio_cancel);
get_ep(epdata);
priv->epdata = epdata;
priv->actual = 0;
+ priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
/* each kiocb is coupled to one usb_request, but we can't
* allocate or submit those if the host disconnected.
@@ -674,7 +695,7 @@ fail:
kfree(priv);
put_ep(epdata);
} else
- value = (iv ? -EIOCBRETRY : -EIOCBQUEUED);
+ value = -EIOCBQUEUED;
return value;
}
@@ -692,7 +713,6 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
if (unlikely(!buf))
return -ENOMEM;
- iocb->ki_retry = ep_aio_read_retry;
return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs);
}
@@ -887,7 +907,6 @@ ep_open (struct inode *inode, struct file *fd)
/* used before endpoint configuration */
static const struct file_operations ep_config_operations = {
- .owner = THIS_MODULE,
.llseek = no_llseek,
.open = ep_open,
@@ -1940,7 +1959,6 @@ dev_open (struct inode *inode, struct file *fd)
}
static const struct file_operations dev_init_operations = {
- .owner = THIS_MODULE,
.llseek = no_llseek,
.open = dev_open,
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index b943d8c..67128be 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -565,7 +565,7 @@ static int proc_udc_show(struct seq_file *s, void *unused)
static int proc_udc_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_udc_show, PDE(inode)->data);
+ return single_open(file, proc_udc_show, PDE_DATA(inode));
}
static const struct file_operations proc_ops = {
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index d9297ee..1e4cfb0 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE(file_inode(file))->data;
+ rndis_params *p = PDE_DATA(file_inode(file));
u32 speed = 0;
int i, fl_speed = 0;
@@ -1109,7 +1109,7 @@ static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
static int rndis_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, rndis_proc_show, PDE(inode)->data);
+ return single_open(file, rndis_proc_show, PDE_DATA(inode));
}
static const struct file_operations rndis_proc_fops = {
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 974480c..b04e8ec 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2175,7 +2175,7 @@ static int proc_isp1362_show(struct seq_file *s, void *unused)
static int proc_isp1362_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_isp1362_show, PDE(inode)->data);
+ return single_open(file, proc_isp1362_show, PDE_DATA(inode));
}
static const struct file_operations proc_ops = {
@@ -2192,14 +2192,11 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
{
struct proc_dir_entry *pde;
- pde = create_proc_entry(proc_filename, 0, NULL);
+ pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, isp1362_hcd);
if (pde == NULL) {
pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
return;
}
-
- pde->proc_fops = &proc_ops;
- pde->data = isp1362_hcd;
isp1362_hcd->pde = pde;
}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 114583a..07592c0 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -14,7 +14,7 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/usb-exynos.h>
+#include <linux/platform_data/usb-ohci-exynos.h>
#include <linux/usb/phy.h>
#include <linux/usb/samsung_usb_phy.h>
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 15ed7e8..ad4483e 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1494,7 +1494,7 @@ static int proc_sl811h_show(struct seq_file *s, void *unused)
static int proc_sl811h_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_sl811h_show, PDE(inode)->data);
+ return single_open(file, proc_sl811h_show, PDE_DATA(inode));
}
static const struct file_operations proc_ops = {
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 42ad2e6..b6ab515 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -407,8 +407,6 @@ static int yurex_release(struct inode *inode, struct file *file)
if (dev == NULL)
return -ENODEV;
- yurex_fasync(-1, file, 0);
-
/* decrement the count on our device */
kref_put(&dev->kref, yurex_delete);
return 0;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4faa982..92b05d9 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -437,22 +437,21 @@ void usb_stor_report_bus_reset(struct us_data *us)
* /proc/scsi/ functions
***********************************************************************/
+static int write_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ /* if someone is sending us data, just throw it away */
+ return length;
+}
+
/* we use this macro to help us write into the buffer */
#undef SPRINTF
-#define SPRINTF(args...) \
- do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+#define SPRINTF(args...) seq_printf(m, ## args)
-static int proc_info (struct Scsi_Host *host, char *buffer,
- char **start, off_t offset, int length, int inout)
+static int show_info (struct seq_file *m, struct Scsi_Host *host)
{
struct us_data *us = host_to_us(host);
- char *pos = buffer;
const char *string;
- /* if someone is sending us data, just throw it away */
- if (inout)
- return length;
-
/* print the controller name */
SPRINTF(" Host scsi%d: usb-storage\n", host->host_no);
@@ -482,28 +481,14 @@ static int proc_info (struct Scsi_Host *host, char *buffer,
SPRINTF(" Transport: %s\n", us->transport_name);
/* show the device flags */
- if (pos < buffer + length) {
- pos += sprintf(pos, " Quirks:");
+ SPRINTF(" Quirks:");
#define US_FLAG(name, value) \
- if (us->fflags & value) pos += sprintf(pos, " " #name);
+ if (us->fflags & value) seq_printf(m, " " #name);
US_DO_ALL_FLAGS
#undef US_FLAG
-
- *(pos++) = '\n';
- }
-
- /*
- * Calculate start of next buffer, and return value.
- */
- *start = buffer + offset;
-
- if ((pos - buffer) < offset)
- return (0);
- else if ((pos - buffer - offset) < length)
- return (pos - buffer - offset);
- else
- return (length);
+ seq_putc(m, '\n');
+ return 0;
}
/***********************************************************************
@@ -548,7 +533,8 @@ struct scsi_host_template usb_stor_host_template = {
/* basic userland interface stuff */
.name = "usb-storage",
.proc_name = "usb-storage",
- .proc_info = proc_info,
+ .show_info = show_info,
+ .write_info = write_info,
.info = host_info,
/* command interface -- queued only */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 09d2e3f..ac37254 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -201,7 +201,9 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
- }
+ } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX)
+ if (pci_is_pcie(vdev->pdev))
+ return 1;
return 0;
}
@@ -317,6 +319,17 @@ static long vfio_pci_ioctl(void *device_data,
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL;
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ if (pci_is_pcie(vdev->pdev))
+ break;
+ /* pass thru to return error */
+ default:
+ return -EINVAL;
+ }
+
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index);
@@ -552,11 +565,40 @@ static void vfio_pci_remove(struct pci_dev *pdev)
kfree(vdev);
}
+static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct vfio_pci_device *vdev;
+ struct vfio_device *device;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (device == NULL)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ vdev = vfio_device_data(device);
+ if (vdev == NULL) {
+ vfio_device_put(device);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+
+ vfio_device_put(device);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static struct pci_error_handlers vfio_err_handlers = {
+ .error_detected = vfio_pci_aer_err_detected,
+};
+
static struct pci_driver vfio_pci_driver = {
.name = "vfio-pci",
.id_table = NULL, /* only dynamic ids */
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
+ .err_handler = &vfio_err_handlers,
};
static void __exit vfio_pci_cleanup(void)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index aeb00fc..affa347 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -274,9 +274,10 @@ static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
return count;
}
-static int vfio_direct_config_write(struct vfio_pci_device *vdev, int pos,
- int count, struct perm_bits *perm,
- int offset, __le32 val)
+/* Raw access skips any kind of virtualization */
+static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
{
int ret;
@@ -287,13 +288,36 @@ static int vfio_direct_config_write(struct vfio_pci_device *vdev, int pos,
return count;
}
-/* Default all regions to read-only, no-virtualization */
+static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ int ret;
+
+ ret = vfio_user_config_read(vdev->pdev, pos, val, count);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return count;
+}
+
+/* Default capability regions to read-only, no-virtualization */
static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
[0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
};
static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
[0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
};
+/*
+ * Default unassigned regions to raw read-write access. Some devices
+ * require this to function as they hide registers between the gaps in
+ * config space (be2net). Like MMIO and I/O port registers, we have
+ * to trust the hardware isolation.
+ */
+static struct perm_bits unassigned_perms = {
+ .readfn = vfio_raw_config_read,
+ .writefn = vfio_raw_config_write
+};
static void free_perm_bits(struct perm_bits *perm)
{
@@ -779,16 +803,16 @@ int __init vfio_pci_init_perm_bits(void)
/* Capabilities */
ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
- cap_perms[PCI_CAP_ID_VPD].writefn = vfio_direct_config_write;
+ cap_perms[PCI_CAP_ID_VPD].writefn = vfio_raw_config_write;
ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
- cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_direct_config_write;
+ cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
/* Extended capabilities */
ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
- ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_direct_config_write;
+ ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
if (ret)
vfio_pci_uninit_perm_bits();
@@ -801,9 +825,6 @@ static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
u8 cap;
int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
PCI_STD_HEADER_SIZEOF;
- base /= 4;
- pos /= 4;
-
cap = vdev->pci_config_map[pos];
if (cap == PCI_CAP_ID_BASIC)
@@ -813,7 +834,7 @@ static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
pos--;
- return pos * 4;
+ return pos;
}
static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
@@ -1017,13 +1038,9 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return byte;
case PCI_CAP_ID_EXP:
/* length based on version */
- ret = pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &word);
- if (ret)
- return pcibios_err_to_errno(ret);
-
vdev->extended_caps = true;
- if ((word & PCI_EXP_FLAGS_VERS) == 1)
+ if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
else
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
@@ -1230,8 +1247,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
}
/* Sanity check, do we overlap other capabilities? */
- for (i = 0; i < len; i += 4) {
- if (likely(map[(pos + i) / 4] == PCI_CAP_ID_INVALID))
+ for (i = 0; i < len; i++) {
+ if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
continue;
pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
@@ -1239,7 +1256,7 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
pos + i, map[pos + i], cap);
}
- memset(map + (pos / 4), cap, len / 4);
+ memset(map + pos, cap, len);
ret = vfio_fill_vconfig_bytes(vdev, pos, len);
if (ret)
return ret;
@@ -1314,8 +1331,8 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
hidden = true;
}
- for (i = 0; i < len; i += 4) {
- if (likely(map[(epos + i) / 4] == PCI_CAP_ID_INVALID))
+ for (i = 0; i < len; i++) {
+ if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
continue;
pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
@@ -1330,7 +1347,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
*/
BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID);
- memset(map + (epos / 4), ecap, len / 4);
+ memset(map + epos, ecap, len);
ret = vfio_fill_vconfig_bytes(vdev, epos, len);
if (ret)
return ret;
@@ -1377,10 +1394,12 @@ int vfio_config_init(struct vfio_pci_device *vdev)
int ret;
/*
- * Config space, caps and ecaps are all dword aligned, so we can
- * use one byte per dword to record the type.
+ * Config space, caps and ecaps are all dword aligned, so we could
+ * use one byte per dword to record the type. However, there are
+ * no requiremenst on the length of a capability, so the gap between
+ * capabilities needs byte granularity.
*/
- map = kmalloc(pdev->cfg_size / 4, GFP_KERNEL);
+ map = kmalloc(pdev->cfg_size, GFP_KERNEL);
if (!map)
return -ENOMEM;
@@ -1393,9 +1412,9 @@ int vfio_config_init(struct vfio_pci_device *vdev)
vdev->pci_config_map = map;
vdev->vconfig = vconfig;
- memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF / 4);
- memset(map + (PCI_STD_HEADER_SIZEOF / 4), PCI_CAP_ID_INVALID,
- (pdev->cfg_size - PCI_STD_HEADER_SIZEOF) / 4);
+ memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
+ memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
+ pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
if (ret)
@@ -1450,6 +1469,22 @@ void vfio_config_free(struct vfio_pci_device *vdev)
vdev->msi_perm = NULL;
}
+/*
+ * Find the remaining number of bytes in a dword that match the given
+ * position. Stop at either the end of the capability or the dword boundary.
+ */
+static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
+ loff_t pos)
+{
+ u8 cap = vdev->pci_config_map[pos];
+ size_t i;
+
+ for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
+ /* nop */;
+
+ return i;
+}
+
static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
@@ -1458,55 +1493,48 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
__le32 val = 0;
int cap_start = 0, offset;
u8 cap_id;
- ssize_t ret = count;
+ ssize_t ret;
- if (*ppos < 0 || *ppos + count > pdev->cfg_size)
+ if (*ppos < 0 || *ppos >= pdev->cfg_size ||
+ *ppos + count > pdev->cfg_size)
return -EFAULT;
/*
- * gcc can't seem to figure out we're a static function, only called
- * with count of 1/2/4 and hits copy_from_user_overflow without this.
+ * Chop accesses into aligned chunks containing no more than a
+ * single capability. Caller increments to the next chunk.
*/
- if (count > sizeof(val))
- return -EINVAL;
-
- cap_id = vdev->pci_config_map[*ppos / 4];
-
- if (cap_id == PCI_CAP_ID_INVALID) {
- if (iswrite)
- return ret; /* drop */
-
- /*
- * Per PCI spec 3.0, section 6.1, reads from reserved and
- * unimplemented registers return 0
- */
- if (copy_to_user(buf, &val, count))
- return -EFAULT;
-
- return ret;
- }
+ count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
+ if (count >= 4 && !(*ppos % 4))
+ count = 4;
+ else if (count >= 2 && !(*ppos % 2))
+ count = 2;
+ else
+ count = 1;
- /*
- * All capabilities are minimum 4 bytes and aligned on dword
- * boundaries. Since we don't support unaligned accesses, we're
- * only ever accessing a single capability.
- */
- if (*ppos >= PCI_CFG_SPACE_SIZE) {
- WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
+ ret = count;
- perm = &ecap_perms[cap_id];
- cap_start = vfio_find_cap_start(vdev, *ppos);
+ cap_id = vdev->pci_config_map[*ppos];
+ if (cap_id == PCI_CAP_ID_INVALID) {
+ perm = &unassigned_perms;
+ cap_start = *ppos;
} else {
- WARN_ON(cap_id > PCI_CAP_ID_MAX);
+ if (*ppos >= PCI_CFG_SPACE_SIZE) {
+ WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
- perm = &cap_perms[cap_id];
+ perm = &ecap_perms[cap_id];
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ } else {
+ WARN_ON(cap_id > PCI_CAP_ID_MAX);
- if (cap_id == PCI_CAP_ID_MSI)
- perm = vdev->msi_perm;
+ perm = &cap_perms[cap_id];
- if (cap_id > PCI_CAP_ID_BASIC)
- cap_start = vfio_find_cap_start(vdev, *ppos);
+ if (cap_id == PCI_CAP_ID_MSI)
+ perm = vdev->msi_perm;
+
+ if (cap_id > PCI_CAP_ID_BASIC)
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ }
}
WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
@@ -1546,20 +1574,8 @@ ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
pos &= VFIO_PCI_OFFSET_MASK;
- /*
- * We want to both keep the access size the caller users as well as
- * support reading large chunks of config space in a single call.
- * PCI doesn't support unaligned accesses, so we can safely break
- * those apart.
- */
while (count) {
- if (count >= 4 && !(pos % 4))
- ret = vfio_config_do_rw(vdev, buf, 4, &pos, iswrite);
- else if (count >= 2 && !(pos % 2))
- ret = vfio_config_do_rw(vdev, buf, 2, &pos, iswrite);
- else
- ret = vfio_config_do_rw(vdev, buf, 1, &pos, iswrite);
-
+ ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);
if (ret < 0)
return ret;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index a965091..4bc704e 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -287,7 +287,8 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
* a signal is necessary, which can then be handled via a work queue
* or directly depending on the caller.
*/
-int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
+static int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev,
+ void *unused)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
@@ -746,6 +747,63 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
return 0;
}
+static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ int32_t fd = *(int32_t *)data;
+ struct pci_dev *pdev = vdev->pdev;
+
+ if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
+ !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
+ return -EINVAL;
+
+ /*
+ * device_lock synchronizes setting and checking of
+ * err_trigger. The vfio_pci_aer_err_detected() is also
+ * called with device_lock held.
+ */
+
+ /* DATA_NONE/DATA_BOOL enables loopback testing */
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ device_lock(&pdev->dev);
+ if (vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+ device_unlock(&pdev->dev);
+ return 0;
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t trigger = *(uint8_t *)data;
+ device_lock(&pdev->dev);
+ if (trigger && vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+ device_unlock(&pdev->dev);
+ return 0;
+ }
+
+ /* Handle SET_DATA_EVENTFD */
+
+ if (fd == -1) {
+ device_lock(&pdev->dev);
+ if (vdev->err_trigger)
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = NULL;
+ device_unlock(&pdev->dev);
+ return 0;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+ device_lock(&pdev->dev);
+ if (vdev->err_trigger)
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = efdctx;
+ device_unlock(&pdev->dev);
+ return 0;
+ } else
+ return -EINVAL;
+}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
unsigned index, unsigned start, unsigned count,
void *data)
@@ -780,6 +838,13 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
break;
}
break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ if (pci_is_pcie(vdev->pdev))
+ func = vfio_pci_set_err_trigger;
+ break;
+ }
}
if (!func)
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index d7e55d0..9c6d5d0 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -56,6 +56,7 @@ struct vfio_pci_device {
bool has_vga;
struct pci_saved_state *pci_saved_state;
atomic_t refcnt;
+ struct eventfd_ctx *err_trigger;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index fcc12f3..acb7121 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -24,8 +24,10 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
@@ -57,7 +59,7 @@ struct vfio_iommu_driver {
struct vfio_container {
struct kref kref;
struct list_head group_list;
- struct mutex group_lock;
+ struct rw_semaphore group_lock;
struct vfio_iommu_driver *iommu_driver;
void *iommu_data;
};
@@ -392,12 +394,13 @@ static void vfio_device_release(struct kref *kref)
}
/* Device reference always implies a group reference */
-static void vfio_device_put(struct vfio_device *device)
+void vfio_device_put(struct vfio_device *device)
{
struct vfio_group *group = device->group;
kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
vfio_group_put(group);
}
+EXPORT_SYMBOL_GPL(vfio_device_put);
static void vfio_device_get(struct vfio_device *device)
{
@@ -627,6 +630,33 @@ int vfio_add_group_dev(struct device *dev,
}
EXPORT_SYMBOL_GPL(vfio_add_group_dev);
+/**
+ * Get a reference to the vfio_device for a device that is known to
+ * be bound to a vfio driver. The driver implicitly holds a
+ * vfio_device reference between vfio_add_group_dev and
+ * vfio_del_group_dev. We can therefore use drvdata to increment
+ * that reference from the struct device. This additional
+ * reference must be released by calling vfio_device_put.
+ */
+struct vfio_device *vfio_device_get_from_dev(struct device *dev)
+{
+ struct vfio_device *device = dev_get_drvdata(dev);
+
+ vfio_device_get(device);
+
+ return device;
+}
+EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
+
+/*
+ * Caller must hold a reference to the vfio_device
+ */
+void *vfio_device_data(struct vfio_device *device)
+{
+ return device->device_data;
+}
+EXPORT_SYMBOL_GPL(vfio_device_data);
+
/* Given a referenced group, check if it contains the device */
static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
{
@@ -675,9 +705,13 @@ EXPORT_SYMBOL_GPL(vfio_del_group_dev);
static long vfio_ioctl_check_extension(struct vfio_container *container,
unsigned long arg)
{
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
long ret = 0;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+
switch (arg) {
/* No base extensions yet */
default:
@@ -707,10 +741,12 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
VFIO_CHECK_EXTENSION, arg);
}
+ up_read(&container->group_lock);
+
return ret;
}
-/* hold container->group_lock */
+/* hold write lock on container->group_lock */
static int __vfio_container_attach_groups(struct vfio_container *container,
struct vfio_iommu_driver *driver,
void *data)
@@ -741,7 +777,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
struct vfio_iommu_driver *driver;
long ret = -ENODEV;
- mutex_lock(&container->group_lock);
+ down_write(&container->group_lock);
/*
* The container is designed to be an unprivileged interface while
@@ -752,7 +788,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
* the container is deprivileged and returns to an unset state.
*/
if (list_empty(&container->group_list) || container->iommu_driver) {
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
return -EINVAL;
}
@@ -799,7 +835,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
mutex_unlock(&vfio.iommu_drivers_lock);
skip_drivers_unlock:
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
return ret;
}
@@ -815,9 +851,6 @@ static long vfio_fops_unl_ioctl(struct file *filep,
if (!container)
return ret;
- driver = container->iommu_driver;
- data = container->iommu_data;
-
switch (cmd) {
case VFIO_GET_API_VERSION:
ret = VFIO_API_VERSION;
@@ -829,8 +862,15 @@ static long vfio_fops_unl_ioctl(struct file *filep,
ret = vfio_ioctl_set_iommu(container, arg);
break;
default:
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ data = container->iommu_data;
+
if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg);
+
+ up_read(&container->group_lock);
}
return ret;
@@ -854,7 +894,7 @@ static int vfio_fops_open(struct inode *inode, struct file *filep)
return -ENOMEM;
INIT_LIST_HEAD(&container->group_list);
- mutex_init(&container->group_lock);
+ init_rwsem(&container->group_lock);
kref_init(&container->kref);
filep->private_data = container;
@@ -881,35 +921,55 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
+ ssize_t ret = -EINVAL;
- if (unlikely(!driver || !driver->ops->read))
- return -EINVAL;
+ down_read(&container->group_lock);
- return driver->ops->read(container->iommu_data, buf, count, ppos);
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->read))
+ ret = driver->ops->read(container->iommu_data,
+ buf, count, ppos);
+
+ up_read(&container->group_lock);
+
+ return ret;
}
static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
+ ssize_t ret = -EINVAL;
- if (unlikely(!driver || !driver->ops->write))
- return -EINVAL;
+ down_read(&container->group_lock);
- return driver->ops->write(container->iommu_data, buf, count, ppos);
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->write))
+ ret = driver->ops->write(container->iommu_data,
+ buf, count, ppos);
+
+ up_read(&container->group_lock);
+
+ return ret;
}
static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
+ int ret = -EINVAL;
- if (unlikely(!driver || !driver->ops->mmap))
- return -EINVAL;
+ down_read(&container->group_lock);
- return driver->ops->mmap(container->iommu_data, vma);
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->mmap))
+ ret = driver->ops->mmap(container->iommu_data, vma);
+
+ up_read(&container->group_lock);
+
+ return ret;
}
static const struct file_operations vfio_fops = {
@@ -933,7 +993,7 @@ static void __vfio_group_unset_container(struct vfio_group *group)
struct vfio_container *container = group->container;
struct vfio_iommu_driver *driver;
- mutex_lock(&container->group_lock);
+ down_write(&container->group_lock);
driver = container->iommu_driver;
if (driver)
@@ -951,7 +1011,7 @@ static void __vfio_group_unset_container(struct vfio_group *group)
container->iommu_data = NULL;
}
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
vfio_container_put(container);
}
@@ -1011,7 +1071,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
container = f.file->private_data;
WARN_ON(!container); /* fget ensures we don't race vfio_release */
- mutex_lock(&container->group_lock);
+ down_write(&container->group_lock);
driver = container->iommu_driver;
if (driver) {
@@ -1029,7 +1089,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
atomic_inc(&group->container_users);
unlock_out:
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
fdput(f);
return ret;
}
@@ -1300,6 +1360,9 @@ static const struct file_operations vfio_device_fops = {
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
{
+ if (MINOR(dev->devt) == 0)
+ *mode = S_IRUGO | S_IWUGO;
+
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index bf24317..8b9226d 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,6 +1,7 @@
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
+ select VHOST_RING
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
@@ -9,6 +10,17 @@ config VHOST_NET
To compile this driver as a module, choose M here: the module will
be called vhost_net.
-if STAGING
-source "drivers/vhost/Kconfig.tcm"
-endif
+config VHOST_SCSI
+ tristate "VHOST_SCSI TCM fabric driver"
+ depends on TARGET_CORE && EVENTFD && m
+ select VHOST_RING
+ default n
+ ---help---
+ Say M here to enable the vhost_scsi TCM fabric module
+ for use with virtio-scsi guests
+
+config VHOST_RING
+ tristate
+ ---help---
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
deleted file mode 100644
index 7e3aa28..0000000
--- a/drivers/vhost/Kconfig.tcm
+++ /dev/null
@@ -1,6 +0,0 @@
-config TCM_VHOST
- tristate "TCM_VHOST fabric module"
- depends on TARGET_CORE && EVENTFD && m
- default n
- ---help---
- Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index a27b053..654e9afb 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := vhost.o net.o
-obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o
+obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
+vhost_scsi-y := scsi.o
+
+obj-$(CONFIG_VHOST_RING) += vringh.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 87c216c..2b51e23 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -59,14 +59,46 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
enum {
+ VHOST_NET_FEATURES = VHOST_FEATURES |
+ (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF),
+};
+
+enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
VHOST_NET_VQ_MAX = 2,
};
+struct vhost_net_ubuf_ref {
+ struct kref kref;
+ wait_queue_head_t wait;
+ struct vhost_virtqueue *vq;
+};
+
+struct vhost_net_virtqueue {
+ struct vhost_virtqueue vq;
+ /* hdr is used to store the virtio header.
+ * Since each iovec has >= 1 byte length, we never need more than
+ * header length entries to store the header. */
+ struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
+ size_t vhost_hlen;
+ size_t sock_hlen;
+ /* vhost zerocopy support fields below: */
+ /* last used idx for outstanding DMA zerocopy buffers */
+ int upend_idx;
+ /* first used idx for DMA done zerocopy buffers */
+ int done_idx;
+ /* an array of userspace buffers info */
+ struct ubuf_info *ubuf_info;
+ /* Reference counting for outstanding ubufs.
+ * Protected by vq mutex. Writers must also take device mutex. */
+ struct vhost_net_ubuf_ref *ubufs;
+};
+
struct vhost_net {
struct vhost_dev dev;
- struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
+ struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
struct vhost_poll poll[VHOST_NET_VQ_MAX];
/* Number of TX recently submitted.
* Protected by tx vq lock. */
@@ -78,6 +110,104 @@ struct vhost_net {
bool tx_flush;
};
+static unsigned vhost_net_zcopy_mask __read_mostly;
+
+static void vhost_net_enable_zcopy(int vq)
+{
+ vhost_net_zcopy_mask |= 0x1 << vq;
+}
+
+static void vhost_net_zerocopy_done_signal(struct kref *kref)
+{
+ struct vhost_net_ubuf_ref *ubufs;
+
+ ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
+ wake_up(&ubufs->wait);
+}
+
+static struct vhost_net_ubuf_ref *
+vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
+{
+ struct vhost_net_ubuf_ref *ubufs;
+ /* No zero copy backend? Nothing to count. */
+ if (!zcopy)
+ return NULL;
+ ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
+ if (!ubufs)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&ubufs->kref);
+ init_waitqueue_head(&ubufs->wait);
+ ubufs->vq = vq;
+ return ubufs;
+}
+
+static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+{
+ kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+}
+
+static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
+{
+ kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ kfree(ubufs);
+}
+
+static void vhost_net_clear_ubuf_info(struct vhost_net *n)
+{
+
+ bool zcopy;
+ int i;
+
+ for (i = 0; i < n->dev.nvqs; ++i) {
+ zcopy = vhost_net_zcopy_mask & (0x1 << i);
+ if (zcopy)
+ kfree(n->vqs[i].ubuf_info);
+ }
+}
+
+int vhost_net_set_ubuf_info(struct vhost_net *n)
+{
+ bool zcopy;
+ int i;
+
+ for (i = 0; i < n->dev.nvqs; ++i) {
+ zcopy = vhost_net_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
+ UIO_MAXIOV, GFP_KERNEL);
+ if (!n->vqs[i].ubuf_info)
+ goto err;
+ }
+ return 0;
+
+err:
+ while (i--) {
+ zcopy = vhost_net_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ kfree(n->vqs[i].ubuf_info);
+ }
+ return -ENOMEM;
+}
+
+void vhost_net_vq_reset(struct vhost_net *n)
+{
+ int i;
+
+ for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
+ n->vqs[i].done_idx = 0;
+ n->vqs[i].upend_idx = 0;
+ n->vqs[i].ubufs = NULL;
+ kfree(n->vqs[i].ubuf_info);
+ n->vqs[i].ubuf_info = NULL;
+ n->vqs[i].vhost_hlen = 0;
+ n->vqs[i].sock_hlen = 0;
+ }
+
+}
+
static void vhost_net_tx_packet(struct vhost_net *net)
{
++net->tx_packets;
@@ -153,10 +283,12 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
static int vhost_zerocopy_signal_used(struct vhost_net *net,
struct vhost_virtqueue *vq)
{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
int i;
int j = 0;
- for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
+ for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
vhost_net_tx_err(net);
if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
@@ -168,13 +300,13 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
break;
}
if (j)
- vq->done_idx = i;
+ nvq->done_idx = i;
return j;
}
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
{
- struct vhost_ubuf_ref *ubufs = ubuf->ctx;
+ struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
int cnt = atomic_read(&ubufs->kref.refcount);
@@ -191,14 +323,15 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
/* set len to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_ubuf_put(ubufs);
+ vhost_net_ubuf_put(ubufs);
}
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
- struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
unsigned out, in, s;
int head;
struct msghdr msg = {
@@ -213,7 +346,7 @@ static void handle_tx(struct vhost_net *net)
int err;
size_t hdr_size;
struct socket *sock;
- struct vhost_ubuf_ref *uninitialized_var(ubufs);
+ struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
/* TODO: check that we are running from vhost_worker? */
@@ -224,8 +357,8 @@ static void handle_tx(struct vhost_net *net)
mutex_lock(&vq->mutex);
vhost_disable_notify(&net->dev, vq);
- hdr_size = vq->vhost_hlen;
- zcopy = vq->ubufs;
+ hdr_size = nvq->vhost_hlen;
+ zcopy = nvq->ubufs;
for (;;) {
/* Release DMAs done buffers first */
@@ -246,9 +379,10 @@ static void handle_tx(struct vhost_net *net)
/* If more outstanding DMAs, queue the work.
* Handle upend_idx wrap around
*/
- num_pends = likely(vq->upend_idx >= vq->done_idx) ?
- (vq->upend_idx - vq->done_idx) :
- (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
+ num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
+ (nvq->upend_idx - nvq->done_idx) :
+ (nvq->upend_idx + UIO_MAXIOV -
+ nvq->done_idx);
if (unlikely(num_pends > VHOST_MAX_PEND))
break;
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -263,54 +397,54 @@ static void handle_tx(struct vhost_net *net)
break;
}
/* Skip header. TODO: support TSO. */
- s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
+ s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
msg.msg_iovlen = out;
len = iov_length(vq->iov, out);
/* Sanity check */
if (!len) {
vq_err(vq, "Unexpected header len for TX: "
"%zd expected %zd\n",
- iov_length(vq->hdr, s), hdr_size);
+ iov_length(nvq->hdr, s), hdr_size);
break;
}
zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
- vq->upend_idx != vq->done_idx);
+ nvq->upend_idx != nvq->done_idx);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- vq->heads[vq->upend_idx].id = head;
+ vq->heads[nvq->upend_idx].id = head;
if (!vhost_net_tx_select_zcopy(net) ||
len < VHOST_GOODCOPY_LEN) {
/* copy don't need to wait for DMA done */
- vq->heads[vq->upend_idx].len =
+ vq->heads[nvq->upend_idx].len =
VHOST_DMA_DONE_LEN;
msg.msg_control = NULL;
msg.msg_controllen = 0;
ubufs = NULL;
} else {
struct ubuf_info *ubuf;
- ubuf = vq->ubuf_info + vq->upend_idx;
+ ubuf = nvq->ubuf_info + nvq->upend_idx;
- vq->heads[vq->upend_idx].len =
+ vq->heads[nvq->upend_idx].len =
VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
- ubuf->ctx = vq->ubufs;
- ubuf->desc = vq->upend_idx;
+ ubuf->ctx = nvq->ubufs;
+ ubuf->desc = nvq->upend_idx;
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
- ubufs = vq->ubufs;
+ ubufs = nvq->ubufs;
kref_get(&ubufs->kref);
}
- vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
+ nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
}
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
if (ubufs)
- vhost_ubuf_put(ubufs);
- vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
- UIO_MAXIOV;
+ vhost_net_ubuf_put(ubufs);
+ nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+ % UIO_MAXIOV;
}
vhost_discard_vq_desc(vq, 1);
break;
@@ -417,7 +551,8 @@ err:
* read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net)
{
- struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_virtqueue *vq = &nvq->vq;
unsigned uninitialized_var(in), log;
struct vhost_log *vq_log;
struct msghdr msg = {
@@ -445,8 +580,8 @@ static void handle_rx(struct vhost_net *net)
mutex_lock(&vq->mutex);
vhost_disable_notify(&net->dev, vq);
- vhost_hlen = vq->vhost_hlen;
- sock_hlen = vq->sock_hlen;
+ vhost_hlen = nvq->vhost_hlen;
+ sock_hlen = nvq->sock_hlen;
vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
@@ -476,11 +611,11 @@ static void handle_rx(struct vhost_net *net)
/* We don't need to be notified again. */
if (unlikely((vhost_hlen)))
/* Skip header. TODO: support TSO. */
- move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
+ move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
else
/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
* needed because recvmsg can modify msg_iov. */
- copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
+ copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
msg.msg_iovlen = in;
err = sock->ops->recvmsg(NULL, sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -494,7 +629,7 @@ static void handle_rx(struct vhost_net *net)
continue;
}
if (unlikely(vhost_hlen) &&
- memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
vhost_hlen)) {
vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
vq->iov->iov_base);
@@ -502,7 +637,7 @@ static void handle_rx(struct vhost_net *net)
}
/* TODO: Should check and handle checksum. */
if (likely(mergeable) &&
- memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
offsetof(typeof(hdr), num_buffers),
sizeof hdr.num_buffers)) {
vq_err(vq, "Failed num_buffers write");
@@ -559,17 +694,34 @@ static int vhost_net_open(struct inode *inode, struct file *f)
{
struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
- int r;
+ struct vhost_virtqueue **vqs;
+ int r, i;
if (!n)
return -ENOMEM;
+ vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ kfree(n);
+ return -ENOMEM;
+ }
dev = &n->dev;
- n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
- n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
- r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
+ vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
+ vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
+ n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
+ n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
+ for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
+ n->vqs[i].ubufs = NULL;
+ n->vqs[i].ubuf_info = NULL;
+ n->vqs[i].upend_idx = 0;
+ n->vqs[i].done_idx = 0;
+ n->vqs[i].vhost_hlen = 0;
+ n->vqs[i].sock_hlen = 0;
+ }
+ r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
if (r < 0) {
kfree(n);
+ kfree(vqs);
return r;
}
@@ -584,7 +736,9 @@ static int vhost_net_open(struct inode *inode, struct file *f)
static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
- struct vhost_poll *poll = n->poll + (vq - n->vqs);
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
if (!vq->private_data)
return;
vhost_poll_stop(poll);
@@ -593,7 +747,9 @@ static void vhost_net_disable_vq(struct vhost_net *n,
static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
- struct vhost_poll *poll = n->poll + (vq - n->vqs);
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
sock = rcu_dereference_protected(vq->private_data,
@@ -621,30 +777,30 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
struct socket **rx_sock)
{
- *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
- *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
+ *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
+ *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
}
static void vhost_net_flush_vq(struct vhost_net *n, int index)
{
vhost_poll_flush(n->poll + index);
- vhost_poll_flush(&n->dev.vqs[index].poll);
+ vhost_poll_flush(&n->vqs[index].vq.poll);
}
static void vhost_net_flush(struct vhost_net *n)
{
vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
- if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) {
- mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
+ if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
+ mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
- mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
+ mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
/* Wait for all lower device DMAs done. */
- vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs);
- mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
+ vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
+ mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
- kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref);
- mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
+ kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+ mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
@@ -658,6 +814,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_net_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev, false);
+ vhost_net_vq_reset(n);
if (tx_sock)
fput(tx_sock->file);
if (rx_sock)
@@ -665,6 +822,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
+ kfree(n->dev.vqs);
kfree(n);
return 0;
}
@@ -738,7 +896,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
struct socket *sock, *oldsock;
struct vhost_virtqueue *vq;
- struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
+ struct vhost_net_virtqueue *nvq;
+ struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
int r;
mutex_lock(&n->dev.mutex);
@@ -750,7 +909,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = -ENOBUFS;
goto err;
}
- vq = n->vqs + index;
+ vq = &n->vqs[index].vq;
+ nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
/* Verify that ring has been setup correctly. */
@@ -768,7 +928,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
oldsock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
if (sock != oldsock) {
- ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
+ ubufs = vhost_net_ubuf_alloc(vq,
+ sock && vhost_sock_zcopy(sock));
if (IS_ERR(ubufs)) {
r = PTR_ERR(ubufs);
goto err_ubufs;
@@ -783,8 +944,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
if (r)
goto err_used;
- oldubufs = vq->ubufs;
- vq->ubufs = ubufs;
+ oldubufs = nvq->ubufs;
+ nvq->ubufs = ubufs;
n->tx_packets = 0;
n->tx_zcopy_err = 0;
@@ -794,7 +955,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
mutex_unlock(&vq->mutex);
if (oldubufs) {
- vhost_ubuf_put_and_wait(oldubufs);
+ vhost_net_ubuf_put_and_wait(oldubufs);
mutex_lock(&vq->mutex);
vhost_zerocopy_signal_used(n, vq);
mutex_unlock(&vq->mutex);
@@ -812,7 +973,7 @@ err_used:
rcu_assign_pointer(vq->private_data, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
- vhost_ubuf_put_and_wait(ubufs);
+ vhost_net_ubuf_put_and_wait(ubufs);
err_ubufs:
fput(sock->file);
err_vq:
@@ -827,14 +988,21 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
+ struct vhost_memory *memory;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
+ memory = vhost_dev_reset_owner_prepare();
+ if (!memory) {
+ err = -ENOMEM;
+ goto done;
+ }
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
- err = vhost_dev_reset_owner(&n->dev);
+ vhost_dev_reset_owner(&n->dev, memory);
+ vhost_net_vq_reset(n);
done:
mutex_unlock(&n->dev.mutex);
if (tx_sock)
@@ -870,16 +1038,33 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
n->dev.acked_features = features;
smp_wmb();
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
- mutex_lock(&n->vqs[i].mutex);
+ mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen;
- mutex_unlock(&n->vqs[i].mutex);
+ mutex_unlock(&n->vqs[i].vq.mutex);
}
vhost_net_flush(n);
mutex_unlock(&n->dev.mutex);
return 0;
}
+static long vhost_net_set_owner(struct vhost_net *n)
+{
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_net_set_ubuf_info(n);
+ if (r)
+ goto out;
+ r = vhost_dev_set_owner(&n->dev);
+ if (r)
+ vhost_net_clear_ubuf_info(n);
+ vhost_net_flush(n);
+out:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
@@ -908,6 +1093,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return vhost_net_set_features(n, features);
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
+ case VHOST_SET_OWNER:
+ return vhost_net_set_owner(n);
default:
mutex_lock(&n->dev.mutex);
r = vhost_dev_ioctl(&n->dev, ioctl, argp);
@@ -948,7 +1135,7 @@ static struct miscdevice vhost_net_misc = {
static int vhost_net_init(void)
{
if (experimental_zcopytx)
- vhost_enable_zcopy(VHOST_NET_VQ_TX);
+ vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
return misc_register(&vhost_net_misc);
}
module_init(vhost_net_init);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/scsi.c
index 1677238..7014202 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/scsi.c
@@ -45,14 +45,116 @@
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include <linux/vhost.h>
-#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include <linux/bitmap.h>
#include "vhost.c"
#include "vhost.h"
-#include "tcm_vhost.h"
+
+#define TCM_VHOST_VERSION "v0.1"
+#define TCM_VHOST_NAMELEN 256
+#define TCM_VHOST_MAX_CDB_SIZE 32
+
+struct vhost_scsi_inflight {
+ /* Wait for the flush operation to finish */
+ struct completion comp;
+ /* Refcount for the inflight reqs */
+ struct kref kref;
+};
+
+struct tcm_vhost_cmd {
+ /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
+ int tvc_vq_desc;
+ /* virtio-scsi initiator task attribute */
+ int tvc_task_attr;
+ /* virtio-scsi initiator data direction */
+ enum dma_data_direction tvc_data_direction;
+ /* Expected data transfer length from virtio-scsi header */
+ u32 tvc_exp_data_len;
+ /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
+ u64 tvc_tag;
+ /* The number of scatterlists associated with this cmd */
+ u32 tvc_sgl_count;
+ /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+ u32 tvc_lun;
+ /* Pointer to the SGL formatted memory from virtio-scsi */
+ struct scatterlist *tvc_sgl;
+ /* Pointer to response */
+ struct virtio_scsi_cmd_resp __user *tvc_resp;
+ /* Pointer to vhost_scsi for our device */
+ struct vhost_scsi *tvc_vhost;
+ /* Pointer to vhost_virtqueue for the cmd */
+ struct vhost_virtqueue *tvc_vq;
+ /* Pointer to vhost nexus memory */
+ struct tcm_vhost_nexus *tvc_nexus;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd tvc_se_cmd;
+ /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+ struct work_struct work;
+ /* Copy of the incoming SCSI command descriptor block (CDB) */
+ unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+ /* Completed commands list, serviced from vhost worker thread */
+ struct llist_node tvc_completion_list;
+ /* Used to track inflight cmd */
+ struct vhost_scsi_inflight *inflight;
+};
+
+struct tcm_vhost_nexus {
+ /* Pointer to TCM session for I_T Nexus */
+ struct se_session *tvn_se_sess;
+};
+
+struct tcm_vhost_nacl {
+ /* Binary World Wide unique Port Name for Vhost Initiator port */
+ u64 iport_wwpn;
+ /* ASCII formatted WWPN for Sas Initiator port */
+ char iport_name[TCM_VHOST_NAMELEN];
+ /* Returned by tcm_vhost_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct vhost_scsi;
+struct tcm_vhost_tpg {
+ /* Vhost port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+ int tv_tpg_port_count;
+ /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
+ int tv_tpg_vhost_count;
+ /* list for tcm_vhost_list */
+ struct list_head tv_tpg_list;
+ /* Used to protect access for tpg_nexus */
+ struct mutex tv_tpg_mutex;
+ /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
+ struct tcm_vhost_nexus *tpg_nexus;
+ /* Pointer back to tcm_vhost_tport */
+ struct tcm_vhost_tport *tport;
+ /* Returned by tcm_vhost_make_tpg() */
+ struct se_portal_group se_tpg;
+ /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
+ struct vhost_scsi *vhost_scsi;
+};
+
+struct tcm_vhost_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* Binary World Wide unique Port Name for Vhost Target port */
+ u64 tport_wwpn;
+ /* ASCII formatted WWPN for Vhost Target port */
+ char tport_name[TCM_VHOST_NAMELEN];
+ /* Returned by tcm_vhost_make_tport() */
+ struct se_wwn tport_wwn;
+};
+
+struct tcm_vhost_evt {
+ /* event to be sent to guest */
+ struct virtio_scsi_event event;
+ /* event list, serviced from vhost worker thread */
+ struct llist_node list;
+};
enum {
VHOST_SCSI_VQ_CTL = 0,
@@ -60,27 +162,36 @@ enum {
VHOST_SCSI_VQ_IO = 2,
};
-/*
- * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
- * kernel but disabling it helps.
- * TODO: debug and remove the workaround.
- */
enum {
- VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
- (1ULL << VIRTIO_SCSI_F_HOTPLUG)
+ VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
};
#define VHOST_SCSI_MAX_TARGET 256
#define VHOST_SCSI_MAX_VQ 128
#define VHOST_SCSI_MAX_EVENT 128
+struct vhost_scsi_virtqueue {
+ struct vhost_virtqueue vq;
+ /*
+ * Reference counting for inflight reqs, used for flush operation. At
+ * each time, one reference tracks new commands submitted, while we
+ * wait for another one to reach 0.
+ */
+ struct vhost_scsi_inflight inflights[2];
+ /*
+ * Indicate current inflight in use, protected by vq->mutex.
+ * Writers must also take dev mutex and flush under it.
+ */
+ int inflight_idx;
+};
+
struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
struct tcm_vhost_tpg **vs_tpg;
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
- struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
+ struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */
@@ -107,6 +218,59 @@ static int iov_num_pages(struct iovec *iov)
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
}
+void tcm_vhost_done_inflight(struct kref *kref)
+{
+ struct vhost_scsi_inflight *inflight;
+
+ inflight = container_of(kref, struct vhost_scsi_inflight, kref);
+ complete(&inflight->comp);
+}
+
+static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
+ struct vhost_scsi_inflight *old_inflight[])
+{
+ struct vhost_scsi_inflight *new_inflight;
+ struct vhost_virtqueue *vq;
+ int idx, i;
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ vq = &vs->vqs[i].vq;
+
+ mutex_lock(&vq->mutex);
+
+ /* store old infight */
+ idx = vs->vqs[i].inflight_idx;
+ if (old_inflight)
+ old_inflight[i] = &vs->vqs[i].inflights[idx];
+
+ /* setup new infight */
+ vs->vqs[i].inflight_idx = idx ^ 1;
+ new_inflight = &vs->vqs[i].inflights[idx ^ 1];
+ kref_init(&new_inflight->kref);
+ init_completion(&new_inflight->comp);
+
+ mutex_unlock(&vq->mutex);
+ }
+}
+
+static struct vhost_scsi_inflight *
+tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_inflight *inflight;
+ struct vhost_scsi_virtqueue *svq;
+
+ svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
+ inflight = &svq->inflights[svq->inflight_idx];
+ kref_get(&inflight->kref);
+
+ return inflight;
+}
+
+static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
+{
+ kref_put(&inflight->kref, tcm_vhost_done_inflight);
+}
+
static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
{
return 1;
@@ -366,7 +530,7 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
u32 event, u32 reason)
{
- struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt;
if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
@@ -403,13 +567,15 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
kfree(tv_cmd->tvc_sgl);
}
+ tcm_vhost_put_inflight(tv_cmd->inflight);
+
kfree(tv_cmd);
}
static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
struct tcm_vhost_evt *evt)
{
- struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
unsigned out, in;
@@ -460,7 +626,7 @@ static void tcm_vhost_evt_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work);
- struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt;
struct llist_node *llnode;
@@ -511,8 +677,10 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
v_rsp.sense_len);
ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
if (likely(ret == 0)) {
+ struct vhost_scsi_virtqueue *q;
vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
- vq = tv_cmd->tvc_vq - vs->vqs;
+ q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
+ vq = q - vs->vqs;
__set_bit(vq, signal);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
@@ -523,10 +691,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vq = -1;
while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
< VHOST_SCSI_MAX_VQ)
- vhost_signal(&vs->dev, &vs->vqs[vq]);
+ vhost_signal(&vs->dev, &vs->vqs[vq].vq);
}
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
+ struct vhost_virtqueue *vq,
struct tcm_vhost_tpg *tv_tpg,
struct virtio_scsi_cmd_req *v_req,
u32 exp_data_len,
@@ -551,6 +720,7 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
tv_cmd->tvc_exp_data_len = exp_data_len;
tv_cmd->tvc_data_direction = data_direction;
tv_cmd->tvc_nexus = tv_nexus;
+ tv_cmd->inflight = tcm_vhost_get_inflight(vq);
return tv_cmd;
}
@@ -806,7 +976,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
for (i = 0; i < data_num; i++)
exp_data_len += vq->iov[data_first + i].iov_len;
- tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
+ tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
exp_data_len, data_direction);
if (IS_ERR(tv_cmd)) {
vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
@@ -938,17 +1108,35 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
{
- vhost_poll_flush(&vs->dev.vqs[index].poll);
+ vhost_poll_flush(&vs->vqs[index].vq.poll);
}
+/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
+ struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
int i;
+ /* Init new inflight and remember the old inflight */
+ tcm_vhost_init_inflight(vs, old_inflight);
+
+ /*
+ * The inflight->kref was initialized to 1. We decrement it here to
+ * indicate the start of the flush operation so that it will reach 0
+ * when all the reqs are finished.
+ */
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
+
+ /* Flush both the vhost poll and vhost work */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
vhost_scsi_flush_vq(vs, i);
vhost_work_flush(&vs->dev, &vs->vs_completion_work);
vhost_work_flush(&vs->dev, &vs->vs_event_work);
+
+ /* Wait for all reqs issued before the flush to be finished */
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ wait_for_completion(&old_inflight[i]->comp);
}
/*
@@ -975,7 +1163,7 @@ static int vhost_scsi_set_endpoint(
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
/* Verify that ring has been setup correctly. */
- if (!vhost_vq_access_ok(&vs->vqs[index])) {
+ if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT;
goto out;
}
@@ -1022,7 +1210,7 @@ static int vhost_scsi_set_endpoint(
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
+ vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, vs_tpg);
@@ -1063,7 +1251,7 @@ static int vhost_scsi_clear_endpoint(
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
- if (!vhost_vq_access_ok(&vs->vqs[index])) {
+ if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT;
goto err_dev;
}
@@ -1103,7 +1291,7 @@ static int vhost_scsi_clear_endpoint(
}
if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
+ vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, NULL);
@@ -1151,24 +1339,39 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
+ struct vhost_virtqueue **vqs;
int r, i;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
+ vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ kfree(s);
+ return -ENOMEM;
+ }
+
vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
s->vs_events_nr = 0;
s->vs_events_missed = false;
- s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
- s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
- s->vqs[i].handle_kick = vhost_scsi_handle_kick;
- r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
+ vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
+ vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
+ s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
+ s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
+ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ vqs[i] = &s->vqs[i].vq;
+ s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
+ }
+ r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
+
+ tcm_vhost_init_inflight(s, NULL);
+
if (r < 0) {
+ kfree(vqs);
kfree(s);
return r;
}
@@ -1190,6 +1393,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_cleanup(&s->dev, false);
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(s);
+ kfree(s->dev.vqs);
kfree(s);
return 0;
}
@@ -1205,7 +1409,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
u32 events_missed;
u64 features;
int r, abi_version = VHOST_SCSI_ABI_VERSION;
- struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
switch (ioctl) {
case VHOST_SCSI_SET_ENDPOINT:
@@ -1333,7 +1537,7 @@ static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
- vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+ vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex);
tcm_vhost_send_evt(vs, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
@@ -1926,7 +2130,8 @@ static void tcm_vhost_exit(void)
destroy_workqueue(tcm_vhost_workqueue);
};
-MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
+MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
+MODULE_ALIAS("tcm_vhost");
MODULE_LICENSE("GPL");
module_init(tcm_vhost_init);
module_exit(tcm_vhost_exit);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
deleted file mode 100644
index 514b9fd..0000000
--- a/drivers/vhost/tcm_vhost.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#define TCM_VHOST_VERSION "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
-
-struct tcm_vhost_cmd {
- /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
- int tvc_vq_desc;
- /* virtio-scsi initiator task attribute */
- int tvc_task_attr;
- /* virtio-scsi initiator data direction */
- enum dma_data_direction tvc_data_direction;
- /* Expected data transfer length from virtio-scsi header */
- u32 tvc_exp_data_len;
- /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
- u64 tvc_tag;
- /* The number of scatterlists associated with this cmd */
- u32 tvc_sgl_count;
- /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
- u32 tvc_lun;
- /* Pointer to the SGL formatted memory from virtio-scsi */
- struct scatterlist *tvc_sgl;
- /* Pointer to response */
- struct virtio_scsi_cmd_resp __user *tvc_resp;
- /* Pointer to vhost_scsi for our device */
- struct vhost_scsi *tvc_vhost;
- /* Pointer to vhost_virtqueue for the cmd */
- struct vhost_virtqueue *tvc_vq;
- /* Pointer to vhost nexus memory */
- struct tcm_vhost_nexus *tvc_nexus;
- /* The TCM I/O descriptor that is accessed via container_of() */
- struct se_cmd tvc_se_cmd;
- /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
- struct work_struct work;
- /* Copy of the incoming SCSI command descriptor block (CDB) */
- unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
- /* Sense buffer that will be mapped into outgoing status */
- unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
- /* Completed commands list, serviced from vhost worker thread */
- struct llist_node tvc_completion_list;
-};
-
-struct tcm_vhost_nexus {
- /* Pointer to TCM session for I_T Nexus */
- struct se_session *tvn_se_sess;
-};
-
-struct tcm_vhost_nacl {
- /* Binary World Wide unique Port Name for Vhost Initiator port */
- u64 iport_wwpn;
- /* ASCII formatted WWPN for Sas Initiator port */
- char iport_name[TCM_VHOST_NAMELEN];
- /* Returned by tcm_vhost_make_nodeacl() */
- struct se_node_acl se_node_acl;
-};
-
-struct vhost_scsi;
-struct tcm_vhost_tpg {
- /* Vhost port target portal group tag for TCM */
- u16 tport_tpgt;
- /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
- int tv_tpg_port_count;
- /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
- int tv_tpg_vhost_count;
- /* list for tcm_vhost_list */
- struct list_head tv_tpg_list;
- /* Used to protect access for tpg_nexus */
- struct mutex tv_tpg_mutex;
- /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
- struct tcm_vhost_nexus *tpg_nexus;
- /* Pointer back to tcm_vhost_tport */
- struct tcm_vhost_tport *tport;
- /* Returned by tcm_vhost_make_tpg() */
- struct se_portal_group se_tpg;
- /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
- struct vhost_scsi *vhost_scsi;
-};
-
-struct tcm_vhost_tport {
- /* SCSI protocol the tport is providing */
- u8 tport_proto_id;
- /* Binary World Wide unique Port Name for Vhost Target port */
- u64 tport_wwpn;
- /* ASCII formatted WWPN for Vhost Target port */
- char tport_name[TCM_VHOST_NAMELEN];
- /* Returned by tcm_vhost_make_tport() */
- struct se_wwn tport_wwn;
-};
-
-struct tcm_vhost_evt {
- /* event to be sent to guest */
- struct virtio_scsi_event event;
- /* event list, serviced from vhost worker thread */
- struct llist_node list;
-};
-
-/*
- * As per request from MST, keep TCM_VHOST related ioctl defines out of
- * linux/vhost.h (user-space) for now..
- */
-
-#include <linux/vhost.h>
-
-/*
- * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
- *
- * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
- * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
- * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
- * All the targets under vhost_wwpn can be seen and used by guset.
- */
-
-#define VHOST_SCSI_ABI_VERSION 1
-
-struct vhost_scsi_target {
- int abi_version;
- char vhost_wwpn[TRANSPORT_IQN_LEN];
- unsigned short vhost_tpgt;
- unsigned short reserved;
-};
-
-/* VHOST_SCSI specific defines */
-#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
-#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
-/* Changing this breaks userspace. */
-#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
-/* Set and get the events missed flag */
-#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
-#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 91d6f06..1ee45bc 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -219,13 +219,20 @@ static long vhost_test_reset_owner(struct vhost_test *n)
{
void *priv = NULL;
long err;
+ struct vhost_memory *memory;
+
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
+ memory = vhost_dev_reset_owner_prepare();
+ if (!memory) {
+ err = -ENOMEM;
+ goto done;
+ }
vhost_test_stop(n, &priv);
vhost_test_flush(n);
- err = vhost_dev_reset_owner(&n->dev);
+ vhost_dev_reset_owner(&n->dev, memory);
done:
mutex_unlock(&n->dev.mutex);
return err;
@@ -275,7 +282,9 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return vhost_test_reset_owner(n);
default:
mutex_lock(&n->dev.mutex);
- r = vhost_dev_ioctl(&n->dev, ioctl, arg);
+ r = vhost_dev_ioctl(&n->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&n->dev, ioctl, argp);
vhost_test_flush(n);
mutex_unlock(&n->dev.mutex);
return r;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4eecdb8..beee7f5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -13,7 +13,7 @@
#include <linux/eventfd.h>
#include <linux/vhost.h>
-#include <linux/virtio_net.h>
+#include <linux/socket.h> /* memcpy_fromiovec */
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
@@ -33,8 +33,6 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
-static unsigned vhost_zcopy_mask __read_mostly;
-
#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
@@ -181,8 +179,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->used_flags = 0;
vq->log_used = false;
vq->log_addr = -1ull;
- vq->vhost_hlen = 0;
- vq->sock_hlen = 0;
vq->private_data = NULL;
vq->log_base = NULL;
vq->error_ctx = NULL;
@@ -191,9 +187,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
- vq->upend_idx = 0;
- vq->done_idx = 0;
- vq->ubufs = NULL;
}
static int vhost_worker(void *data)
@@ -253,43 +246,29 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
vq->log = NULL;
kfree(vq->heads);
vq->heads = NULL;
- kfree(vq->ubuf_info);
- vq->ubuf_info = NULL;
-}
-
-void vhost_enable_zcopy(int vq)
-{
- vhost_zcopy_mask |= 0x1 << vq;
}
/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
int i;
- bool zcopy;
for (i = 0; i < dev->nvqs; ++i) {
- dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
+ dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect *
UIO_MAXIOV, GFP_KERNEL);
- dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
+ dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV,
GFP_KERNEL);
- dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
+ dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads *
UIO_MAXIOV, GFP_KERNEL);
- zcopy = vhost_zcopy_mask & (0x1 << i);
- if (zcopy)
- dev->vqs[i].ubuf_info =
- kmalloc(sizeof *dev->vqs[i].ubuf_info *
- UIO_MAXIOV, GFP_KERNEL);
- if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
- !dev->vqs[i].heads ||
- (zcopy && !dev->vqs[i].ubuf_info))
+ if (!dev->vqs[i]->indirect || !dev->vqs[i]->log ||
+ !dev->vqs[i]->heads)
goto err_nomem;
}
return 0;
err_nomem:
for (; i >= 0; --i)
- vhost_vq_free_iovecs(&dev->vqs[i]);
+ vhost_vq_free_iovecs(dev->vqs[i]);
return -ENOMEM;
}
@@ -298,11 +277,11 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
int i;
for (i = 0; i < dev->nvqs; ++i)
- vhost_vq_free_iovecs(&dev->vqs[i]);
+ vhost_vq_free_iovecs(dev->vqs[i]);
}
long vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue *vqs, int nvqs)
+ struct vhost_virtqueue **vqs, int nvqs)
{
int i;
@@ -318,16 +297,15 @@ long vhost_dev_init(struct vhost_dev *dev,
dev->worker = NULL;
for (i = 0; i < dev->nvqs; ++i) {
- dev->vqs[i].log = NULL;
- dev->vqs[i].indirect = NULL;
- dev->vqs[i].heads = NULL;
- dev->vqs[i].ubuf_info = NULL;
- dev->vqs[i].dev = dev;
- mutex_init(&dev->vqs[i].mutex);
- vhost_vq_reset(dev, dev->vqs + i);
- if (dev->vqs[i].handle_kick)
- vhost_poll_init(&dev->vqs[i].poll,
- dev->vqs[i].handle_kick, POLLIN, dev);
+ dev->vqs[i]->log = NULL;
+ dev->vqs[i]->indirect = NULL;
+ dev->vqs[i]->heads = NULL;
+ dev->vqs[i]->dev = dev;
+ mutex_init(&dev->vqs[i]->mutex);
+ vhost_vq_reset(dev, dev->vqs[i]);
+ if (dev->vqs[i]->handle_kick)
+ vhost_poll_init(&dev->vqs[i]->poll,
+ dev->vqs[i]->handle_kick, POLLIN, dev);
}
return 0;
@@ -366,7 +344,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
}
/* Caller should have device mutex */
-static long vhost_dev_set_owner(struct vhost_dev *dev)
+long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
@@ -408,21 +386,19 @@ err_mm:
return err;
}
-/* Caller should have device mutex */
-long vhost_dev_reset_owner(struct vhost_dev *dev)
+struct vhost_memory *vhost_dev_reset_owner_prepare(void)
{
- struct vhost_memory *memory;
-
- /* Restore memory to default empty mapping. */
- memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
- if (!memory)
- return -ENOMEM;
+ return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+}
+/* Caller should have device mutex */
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
+{
vhost_dev_cleanup(dev, true);
+ /* Restore memory to default empty mapping. */
memory->nregions = 0;
RCU_INIT_POINTER(dev->memory, memory);
- return 0;
}
void vhost_dev_stop(struct vhost_dev *dev)
@@ -430,9 +406,9 @@ void vhost_dev_stop(struct vhost_dev *dev)
int i;
for (i = 0; i < dev->nvqs; ++i) {
- if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
- vhost_poll_stop(&dev->vqs[i].poll);
- vhost_poll_flush(&dev->vqs[i].poll);
+ if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+ vhost_poll_stop(&dev->vqs[i]->poll);
+ vhost_poll_flush(&dev->vqs[i]->poll);
}
}
}
@@ -443,17 +419,17 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
int i;
for (i = 0; i < dev->nvqs; ++i) {
- if (dev->vqs[i].error_ctx)
- eventfd_ctx_put(dev->vqs[i].error_ctx);
- if (dev->vqs[i].error)
- fput(dev->vqs[i].error);
- if (dev->vqs[i].kick)
- fput(dev->vqs[i].kick);
- if (dev->vqs[i].call_ctx)
- eventfd_ctx_put(dev->vqs[i].call_ctx);
- if (dev->vqs[i].call)
- fput(dev->vqs[i].call);
- vhost_vq_reset(dev, dev->vqs + i);
+ if (dev->vqs[i]->error_ctx)
+ eventfd_ctx_put(dev->vqs[i]->error_ctx);
+ if (dev->vqs[i]->error)
+ fput(dev->vqs[i]->error);
+ if (dev->vqs[i]->kick)
+ fput(dev->vqs[i]->kick);
+ if (dev->vqs[i]->call_ctx)
+ eventfd_ctx_put(dev->vqs[i]->call_ctx);
+ if (dev->vqs[i]->call)
+ fput(dev->vqs[i]->call);
+ vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
@@ -524,14 +500,14 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
for (i = 0; i < d->nvqs; ++i) {
int ok;
- mutex_lock(&d->vqs[i].mutex);
+ mutex_lock(&d->vqs[i]->mutex);
/* If ring is inactive, will check when it's enabled. */
- if (d->vqs[i].private_data)
- ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
+ if (d->vqs[i]->private_data)
+ ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
log_all);
else
ok = 1;
- mutex_unlock(&d->vqs[i].mutex);
+ mutex_unlock(&d->vqs[i]->mutex);
if (!ok)
return 0;
}
@@ -641,7 +617,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
if (idx >= d->nvqs)
return -ENOBUFS;
- vq = d->vqs + idx;
+ vq = d->vqs[idx];
mutex_lock(&vq->mutex);
@@ -852,7 +828,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq;
void __user *base = (void __user *)(unsigned long)p;
- vq = d->vqs + i;
+ vq = d->vqs[i];
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
if (vq->private_data && !vq_log_access_ok(d, vq, base))
@@ -879,9 +855,9 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
} else
filep = eventfp;
for (i = 0; i < d->nvqs; ++i) {
- mutex_lock(&d->vqs[i].mutex);
- d->vqs[i].log_ctx = d->log_ctx;
- mutex_unlock(&d->vqs[i].mutex);
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->log_ctx = d->log_ctx;
+ mutex_unlock(&d->vqs[i]->mutex);
}
if (ctx)
eventfd_ctx_put(ctx);
@@ -1551,38 +1527,3 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->used->flags, r);
}
}
-
-static void vhost_zerocopy_done_signal(struct kref *kref)
-{
- struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
- kref);
- wake_up(&ubufs->wait);
-}
-
-struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
- bool zcopy)
-{
- struct vhost_ubuf_ref *ubufs;
- /* No zero copy backend? Nothing to count. */
- if (!zcopy)
- return NULL;
- ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
- if (!ubufs)
- return ERR_PTR(-ENOMEM);
- kref_init(&ubufs->kref);
- init_waitqueue_head(&ubufs->wait);
- ubufs->vq = vq;
- return ubufs;
-}
-
-void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
-{
- kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
-}
-
-void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
-{
- kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
- kfree(ubufs);
-}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 17261e2..a7ad635 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -54,18 +54,6 @@ struct vhost_log {
struct vhost_virtqueue;
-struct vhost_ubuf_ref {
- struct kref kref;
- wait_queue_head_t wait;
- struct vhost_virtqueue *vq;
-};
-
-struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
-void vhost_ubuf_put(struct vhost_ubuf_ref *);
-void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
-
-struct ubuf_info;
-
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -111,13 +99,7 @@ struct vhost_virtqueue {
u64 log_addr;
struct iovec iov[UIO_MAXIOV];
- /* hdr is used to store the virtio header.
- * Since each iovec has >= 1 byte length, we never need more than
- * header length entries to store the header. */
- struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
struct iovec *indirect;
- size_t vhost_hlen;
- size_t sock_hlen;
struct vring_used_elem *heads;
/* We use a kind of RCU to access private pointer.
* All readers access it from worker, which makes it possible to
@@ -130,16 +112,6 @@ struct vhost_virtqueue {
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
- /* vhost zerocopy support fields below: */
- /* last used idx for outstanding DMA zerocopy buffers */
- int upend_idx;
- /* first used idx for DMA done zerocopy buffers */
- int done_idx;
- /* an array of userspace buffers info */
- struct ubuf_info *ubuf_info;
- /* Reference counting for outstanding ubufs.
- * Protected by vq mutex. Writers must also take device mutex. */
- struct vhost_ubuf_ref *ubufs;
};
struct vhost_dev {
@@ -150,7 +122,7 @@ struct vhost_dev {
struct mm_struct *mm;
struct mutex mutex;
unsigned acked_features;
- struct vhost_virtqueue *vqs;
+ struct vhost_virtqueue **vqs;
int nvqs;
struct file *log_file;
struct eventfd_ctx *log_ctx;
@@ -159,9 +131,11 @@ struct vhost_dev {
struct task_struct *worker;
};
-long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
+long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+long vhost_dev_set_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-long vhost_dev_reset_owner(struct vhost_dev *);
+struct vhost_memory *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
@@ -201,9 +175,6 @@ enum {
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) |
(1ULL << VHOST_F_LOG_ALL),
- VHOST_NET_FEATURES = VHOST_FEATURES |
- (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF),
};
static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
@@ -215,7 +186,4 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
acked_features = rcu_dereference_index_check(dev->acked_features, 1);
return acked_features & (1 << bit);
}
-
-void vhost_enable_zcopy(int vq);
-
#endif
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
new file mode 100644
index 0000000..bff0775
--- /dev/null
+++ b/drivers/vhost/vringh.c
@@ -0,0 +1,1007 @@
+/*
+ * Helpers for the host side of a virtio ring.
+ *
+ * Since these may be in userspace, we use (inline) accessors.
+ */
+#include <linux/vringh.h>
+#include <linux/virtio_ring.h>
+#include <linux/kernel.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
+{
+ static DEFINE_RATELIMIT_STATE(vringh_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ if (__ratelimit(&vringh_rs)) {
+ va_list ap;
+ va_start(ap, fmt);
+ printk(KERN_NOTICE "vringh:");
+ vprintk(fmt, ap);
+ va_end(ap);
+ }
+}
+
+/* Returns vring->num if empty, -ve on error. */
+static inline int __vringh_get_head(const struct vringh *vrh,
+ int (*getu16)(u16 *val, const u16 *p),
+ u16 *last_avail_idx)
+{
+ u16 avail_idx, i, head;
+ int err;
+
+ err = getu16(&avail_idx, &vrh->vring.avail->idx);
+ if (err) {
+ vringh_bad("Failed to access avail idx at %p",
+ &vrh->vring.avail->idx);
+ return err;
+ }
+
+ if (*last_avail_idx == avail_idx)
+ return vrh->vring.num;
+
+ /* Only get avail ring entries after they have been exposed by guest. */
+ virtio_rmb(vrh->weak_barriers);
+
+ i = *last_avail_idx & (vrh->vring.num - 1);
+
+ err = getu16(&head, &vrh->vring.avail->ring[i]);
+ if (err) {
+ vringh_bad("Failed to read head: idx %d address %p",
+ *last_avail_idx, &vrh->vring.avail->ring[i]);
+ return err;
+ }
+
+ if (head >= vrh->vring.num) {
+ vringh_bad("Guest says index %u > %u is available",
+ head, vrh->vring.num);
+ return -EINVAL;
+ }
+
+ (*last_avail_idx)++;
+ return head;
+}
+
+/* Copy some bytes to/from the iovec. Returns num copied. */
+static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
+ void *ptr, size_t len,
+ int (*xfer)(void *addr, void *ptr,
+ size_t len))
+{
+ int err, done = 0;
+
+ while (len && iov->i < iov->used) {
+ size_t partlen;
+
+ partlen = min(iov->iov[iov->i].iov_len, len);
+ err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
+ if (err)
+ return err;
+ done += partlen;
+ len -= partlen;
+ ptr += partlen;
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+ }
+ return done;
+}
+
+/* May reduce *len if range is shorter. */
+static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *,
+ u64, struct vringh_range *))
+{
+ if (addr < range->start || addr > range->end_incl) {
+ if (!getrange(vrh, addr, range))
+ return false;
+ }
+ BUG_ON(addr < range->start || addr > range->end_incl);
+
+ /* To end of memory? */
+ if (unlikely(addr + *len == 0)) {
+ if (range->end_incl == -1ULL)
+ return true;
+ goto truncate;
+ }
+
+ /* Otherwise, don't wrap. */
+ if (addr + *len < addr) {
+ vringh_bad("Wrapping descriptor %zu@0x%llx",
+ *len, (unsigned long long)addr);
+ return false;
+ }
+
+ if (unlikely(addr + *len - 1 > range->end_incl))
+ goto truncate;
+ return true;
+
+truncate:
+ *len = range->end_incl + 1 - addr;
+ return true;
+}
+
+static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *,
+ u64, struct vringh_range *))
+{
+ return true;
+}
+
+/* No reason for this code to be inline. */
+static int move_to_indirect(int *up_next, u16 *i, void *addr,
+ const struct vring_desc *desc,
+ struct vring_desc **descs, int *desc_max)
+{
+ /* Indirect tables can't have indirect. */
+ if (*up_next != -1) {
+ vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
+ return -EINVAL;
+ }
+
+ if (unlikely(desc->len % sizeof(struct vring_desc))) {
+ vringh_bad("Strange indirect len %u", desc->len);
+ return -EINVAL;
+ }
+
+ /* We will check this when we follow it! */
+ if (desc->flags & VRING_DESC_F_NEXT)
+ *up_next = desc->next;
+ else
+ *up_next = -2;
+ *descs = addr;
+ *desc_max = desc->len / sizeof(struct vring_desc);
+
+ /* Now, start at the first indirect. */
+ *i = 0;
+ return 0;
+}
+
+static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
+{
+ struct kvec *new;
+ unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
+
+ if (new_num < 8)
+ new_num = 8;
+
+ flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
+ if (flag)
+ new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
+ else {
+ new = kmalloc(new_num * sizeof(struct iovec), gfp);
+ if (new) {
+ memcpy(new, iov->iov,
+ iov->max_num * sizeof(struct iovec));
+ flag = VRINGH_IOV_ALLOCATED;
+ }
+ }
+ if (!new)
+ return -ENOMEM;
+ iov->iov = new;
+ iov->max_num = (new_num | flag);
+ return 0;
+}
+
+static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
+ struct vring_desc **descs, int *desc_max)
+{
+ u16 i = *up_next;
+
+ *up_next = -1;
+ *descs = vrh->vring.desc;
+ *desc_max = vrh->vring.num;
+ return i;
+}
+
+static int slow_copy(struct vringh *vrh, void *dst, const void *src,
+ bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *vrh,
+ u64,
+ struct vringh_range *)),
+ bool (*getrange)(struct vringh *vrh,
+ u64 addr,
+ struct vringh_range *r),
+ struct vringh_range *range,
+ int (*copy)(void *dst, const void *src, size_t len))
+{
+ size_t part, len = sizeof(struct vring_desc);
+
+ do {
+ u64 addr;
+ int err;
+
+ part = len;
+ addr = (u64)(unsigned long)src - range->offset;
+
+ if (!rcheck(vrh, addr, &part, range, getrange))
+ return -EINVAL;
+
+ err = copy(dst, src, part);
+ if (err)
+ return err;
+
+ dst += part;
+ src += part;
+ len -= part;
+ } while (len);
+ return 0;
+}
+
+static inline int
+__vringh_iov(struct vringh *vrh, u16 i,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *, u64,
+ struct vringh_range *)),
+ bool (*getrange)(struct vringh *, u64, struct vringh_range *),
+ gfp_t gfp,
+ int (*copy)(void *dst, const void *src, size_t len))
+{
+ int err, count = 0, up_next, desc_max;
+ struct vring_desc desc, *descs;
+ struct vringh_range range = { -1ULL, 0 }, slowrange;
+ bool slow = false;
+
+ /* We start traversing vring's descriptor table. */
+ descs = vrh->vring.desc;
+ desc_max = vrh->vring.num;
+ up_next = -1;
+
+ if (riov)
+ riov->i = riov->used = 0;
+ else if (wiov)
+ wiov->i = wiov->used = 0;
+ else
+ /* You must want something! */
+ BUG();
+
+ for (;;) {
+ void *addr;
+ struct vringh_kiov *iov;
+ size_t len;
+
+ if (unlikely(slow))
+ err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
+ &slowrange, copy);
+ else
+ err = copy(&desc, &descs[i], sizeof(desc));
+ if (unlikely(err))
+ goto fail;
+
+ if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ /* Make sure it's OK, and get offset. */
+ len = desc.len;
+ if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (unlikely(len != desc.len)) {
+ slow = true;
+ /* We need to save this range to use offset */
+ slowrange = range;
+ }
+
+ addr = (void *)(long)(desc.addr + range.offset);
+ err = move_to_indirect(&up_next, &i, addr, &desc,
+ &descs, &desc_max);
+ if (err)
+ goto fail;
+ continue;
+ }
+
+ if (count++ == vrh->vring.num) {
+ vringh_bad("Descriptor loop in %p", descs);
+ err = -ELOOP;
+ goto fail;
+ }
+
+ if (desc.flags & VRING_DESC_F_WRITE)
+ iov = wiov;
+ else {
+ iov = riov;
+ if (unlikely(wiov && wiov->i)) {
+ vringh_bad("Readable desc %p after writable",
+ &descs[i]);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (!iov) {
+ vringh_bad("Unexpected %s desc",
+ !wiov ? "writable" : "readable");
+ err = -EPROTO;
+ goto fail;
+ }
+
+ again:
+ /* Make sure it's OK, and get offset. */
+ len = desc.len;
+ if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
+ err = -EINVAL;
+ goto fail;
+ }
+ addr = (void *)(unsigned long)(desc.addr + range.offset);
+
+ if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
+ err = resize_iovec(iov, gfp);
+ if (err)
+ goto fail;
+ }
+
+ iov->iov[iov->used].iov_base = addr;
+ iov->iov[iov->used].iov_len = len;
+ iov->used++;
+
+ if (unlikely(len != desc.len)) {
+ desc.len -= len;
+ desc.addr += len;
+ goto again;
+ }
+
+ if (desc.flags & VRING_DESC_F_NEXT) {
+ i = desc.next;
+ } else {
+ /* Just in case we need to finish traversing above. */
+ if (unlikely(up_next > 0)) {
+ i = return_from_indirect(vrh, &up_next,
+ &descs, &desc_max);
+ slow = false;
+ } else
+ break;
+ }
+
+ if (i >= desc_max) {
+ vringh_bad("Chained index %u > %u", i, desc_max);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return err;
+}
+
+static inline int __vringh_complete(struct vringh *vrh,
+ const struct vring_used_elem *used,
+ unsigned int num_used,
+ int (*putu16)(u16 *p, u16 val),
+ int (*putused)(struct vring_used_elem *dst,
+ const struct vring_used_elem
+ *src, unsigned num))
+{
+ struct vring_used *used_ring;
+ int err;
+ u16 used_idx, off;
+
+ used_ring = vrh->vring.used;
+ used_idx = vrh->last_used_idx + vrh->completed;
+
+ off = used_idx % vrh->vring.num;
+
+ /* Compiler knows num_used == 1 sometimes, hence extra check */
+ if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
+ u16 part = vrh->vring.num - off;
+ err = putused(&used_ring->ring[off], used, part);
+ if (!err)
+ err = putused(&used_ring->ring[0], used + part,
+ num_used - part);
+ } else
+ err = putused(&used_ring->ring[off], used, num_used);
+
+ if (err) {
+ vringh_bad("Failed to write %u used entries %u at %p",
+ num_used, off, &used_ring->ring[off]);
+ return err;
+ }
+
+ /* Make sure buffer is written before we update index. */
+ virtio_wmb(vrh->weak_barriers);
+
+ err = putu16(&vrh->vring.used->idx, used_idx + num_used);
+ if (err) {
+ vringh_bad("Failed to update used index at %p",
+ &vrh->vring.used->idx);
+ return err;
+ }
+
+ vrh->completed += num_used;
+ return 0;
+}
+
+
+static inline int __vringh_need_notify(struct vringh *vrh,
+ int (*getu16)(u16 *val, const u16 *p))
+{
+ bool notify;
+ u16 used_event;
+ int err;
+
+ /* Flush out used index update. This is paired with the
+ * barrier that the Guest executes when enabling
+ * interrupts. */
+ virtio_mb(vrh->weak_barriers);
+
+ /* Old-style, without event indices. */
+ if (!vrh->event_indices) {
+ u16 flags;
+ err = getu16(&flags, &vrh->vring.avail->flags);
+ if (err) {
+ vringh_bad("Failed to get flags at %p",
+ &vrh->vring.avail->flags);
+ return err;
+ }
+ return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
+ }
+
+ /* Modern: we know when other side wants to know. */
+ err = getu16(&used_event, &vring_used_event(&vrh->vring));
+ if (err) {
+ vringh_bad("Failed to get used event idx at %p",
+ &vring_used_event(&vrh->vring));
+ return err;
+ }
+
+ /* Just in case we added so many that we wrap. */
+ if (unlikely(vrh->completed > 0xffff))
+ notify = true;
+ else
+ notify = vring_need_event(used_event,
+ vrh->last_used_idx + vrh->completed,
+ vrh->last_used_idx);
+
+ vrh->last_used_idx += vrh->completed;
+ vrh->completed = 0;
+ return notify;
+}
+
+static inline bool __vringh_notify_enable(struct vringh *vrh,
+ int (*getu16)(u16 *val, const u16 *p),
+ int (*putu16)(u16 *p, u16 val))
+{
+ u16 avail;
+
+ if (!vrh->event_indices) {
+ /* Old-school; update flags. */
+ if (putu16(&vrh->vring.used->flags, 0) != 0) {
+ vringh_bad("Clearing used flags %p",
+ &vrh->vring.used->flags);
+ return true;
+ }
+ } else {
+ if (putu16(&vring_avail_event(&vrh->vring),
+ vrh->last_avail_idx) != 0) {
+ vringh_bad("Updating avail event index %p",
+ &vring_avail_event(&vrh->vring));
+ return true;
+ }
+ }
+
+ /* They could have slipped one in as we were doing that: make
+ * sure it's written, then check again. */
+ virtio_mb(vrh->weak_barriers);
+
+ if (getu16(&avail, &vrh->vring.avail->idx) != 0) {
+ vringh_bad("Failed to check avail idx at %p",
+ &vrh->vring.avail->idx);
+ return true;
+ }
+
+ /* This is unlikely, so we just leave notifications enabled
+ * (if we're using event_indices, we'll only get one
+ * notification anyway). */
+ return avail == vrh->last_avail_idx;
+}
+
+static inline void __vringh_notify_disable(struct vringh *vrh,
+ int (*putu16)(u16 *p, u16 val))
+{
+ if (!vrh->event_indices) {
+ /* Old-school; update flags. */
+ if (putu16(&vrh->vring.used->flags, VRING_USED_F_NO_NOTIFY)) {
+ vringh_bad("Setting used flags %p",
+ &vrh->vring.used->flags);
+ }
+ }
+}
+
+/* Userspace access helpers: in this case, addresses are really userspace. */
+static inline int getu16_user(u16 *val, const u16 *p)
+{
+ return get_user(*val, (__force u16 __user *)p);
+}
+
+static inline int putu16_user(u16 *p, u16 val)
+{
+ return put_user(val, (__force u16 __user *)p);
+}
+
+static inline int copydesc_user(void *dst, const void *src, size_t len)
+{
+ return copy_from_user(dst, (__force void __user *)src, len) ?
+ -EFAULT : 0;
+}
+
+static inline int putused_user(struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ return copy_to_user((__force void __user *)dst, src,
+ sizeof(*dst) * num) ? -EFAULT : 0;
+}
+
+static inline int xfer_from_user(void *src, void *dst, size_t len)
+{
+ return copy_from_user(dst, (__force void __user *)src, len) ?
+ -EFAULT : 0;
+}
+
+static inline int xfer_to_user(void *dst, void *src, size_t len)
+{
+ return copy_to_user((__force void __user *)dst, src, len) ?
+ -EFAULT : 0;
+}
+
+/**
+ * vringh_init_user - initialize a vringh for a userspace vring.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid: you should check pointers
+ * yourself!
+ */
+int vringh_init_user(struct vringh *vrh, u32 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc __user *desc,
+ struct vring_avail __user *avail,
+ struct vring_used __user *used)
+{
+ /* Sane power of 2 please! */
+ if (!num || num > 0xffff || (num & (num - 1))) {
+ vringh_bad("Bad ring size %u", num);
+ return -EINVAL;
+ }
+
+ vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
+ vrh->weak_barriers = weak_barriers;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ vrh->vring.num = num;
+ /* vring expects kernel addresses, but only used via accessors. */
+ vrh->vring.desc = (__force struct vring_desc *)desc;
+ vrh->vring.avail = (__force struct vring_avail *)avail;
+ vrh->vring.used = (__force struct vring_used *)used;
+ return 0;
+}
+EXPORT_SYMBOL(vringh_init_user);
+
+/**
+ * vringh_getdesc_user - get next available descriptor from userspace ring.
+ * @vrh: the userspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @getrange: function to call to check ranges.
+ * @head: head index we received, for passing to vringh_complete_user().
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you may need to clean up riov and wiov, even on error!
+ */
+int vringh_getdesc_user(struct vringh *vrh,
+ struct vringh_iov *riov,
+ struct vringh_iov *wiov,
+ bool (*getrange)(struct vringh *vrh,
+ u64 addr, struct vringh_range *r),
+ u16 *head)
+{
+ int err;
+
+ *head = vrh->vring.num;
+ err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ /* We need the layouts to be the identical for this to work */
+ BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
+ offsetof(struct vringh_iov, iov));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
+ offsetof(struct vringh_iov, i));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
+ offsetof(struct vringh_iov, used));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
+ offsetof(struct vringh_iov, max_num));
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
+ offsetof(struct kvec, iov_base));
+ BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
+ offsetof(struct kvec, iov_len));
+ BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
+ != sizeof(((struct kvec *)NULL)->iov_base));
+ BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
+ != sizeof(((struct kvec *)NULL)->iov_len));
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
+ (struct vringh_kiov *)wiov,
+ range_check, getrange, GFP_KERNEL, copydesc_user);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_user);
+
+/**
+ * vringh_iov_pull_user - copy bytes from vring_iov.
+ * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
+{
+ return vringh_iov_xfer((struct vringh_kiov *)riov,
+ dst, len, xfer_from_user);
+}
+EXPORT_SYMBOL(vringh_iov_pull_user);
+
+/**
+ * vringh_iov_push_user - copy bytes into vring_iov.
+ * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer((struct vringh_kiov *)wiov,
+ (void *)src, len, xfer_to_user);
+}
+EXPORT_SYMBOL(vringh_iov_push_user);
+
+/**
+ * vringh_abandon_user - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_user() to undo).
+ *
+ * The next vringh_get_user() will return the old descriptor(s) again.
+ */
+void vringh_abandon_user(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet. */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_user);
+
+/**
+ * vringh_complete_user - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_user.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_user() after one or more calls
+ * to this function.
+ */
+int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = head;
+ used.len = len;
+ return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
+}
+EXPORT_SYMBOL(vringh_complete_user);
+
+/**
+ * vringh_complete_multi_user - we've finished with many descriptors.
+ * @vrh: the vring.
+ * @used: the head, length pairs.
+ * @num_used: the number of used elements.
+ *
+ * You should check vringh_need_notify_user() after one or more calls
+ * to this function.
+ */
+int vringh_complete_multi_user(struct vringh *vrh,
+ const struct vring_used_elem used[],
+ unsigned num_used)
+{
+ return __vringh_complete(vrh, used, num_used,
+ putu16_user, putused_user);
+}
+EXPORT_SYMBOL(vringh_complete_multi_user);
+
+/**
+ * vringh_notify_enable_user - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_user(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_user, putu16_user);
+}
+EXPORT_SYMBOL(vringh_notify_enable_user);
+
+/**
+ * vringh_notify_disable_user - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_user(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_user);
+}
+EXPORT_SYMBOL(vringh_notify_disable_user);
+
+/**
+ * vringh_need_notify_user - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_user() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_user(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_user);
+}
+EXPORT_SYMBOL(vringh_need_notify_user);
+
+/* Kernelspace access helpers. */
+static inline int getu16_kern(u16 *val, const u16 *p)
+{
+ *val = ACCESS_ONCE(*p);
+ return 0;
+}
+
+static inline int putu16_kern(u16 *p, u16 val)
+{
+ ACCESS_ONCE(*p) = val;
+ return 0;
+}
+
+static inline int copydesc_kern(void *dst, const void *src, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
+static inline int putused_kern(struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ memcpy(dst, src, num * sizeof(*dst));
+ return 0;
+}
+
+static inline int xfer_kern(void *src, void *dst, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
+/**
+ * vringh_init_kern - initialize a vringh for a kernelspace vring.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_kern(struct vringh *vrh, u32 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ /* Sane power of 2 please! */
+ if (!num || num > 0xffff || (num & (num - 1))) {
+ vringh_bad("Bad ring size %u", num);
+ return -EINVAL;
+ }
+
+ vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
+ vrh->weak_barriers = weak_barriers;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ vrh->vring.num = num;
+ vrh->vring.desc = desc;
+ vrh->vring.avail = avail;
+ vrh->vring.used = used;
+ return 0;
+}
+EXPORT_SYMBOL(vringh_init_kern);
+
+/**
+ * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
+ * @vrh: the kernelspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @head: head index we received, for passing to vringh_complete_kern().
+ * @gfp: flags for allocating larger riov/wiov.
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you may need to clean up riov and wiov, even on error!
+ */
+int vringh_getdesc_kern(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp)
+{
+ int err;
+
+ err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
+ gfp, copydesc_kern);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_kern);
+
+/**
+ * vringh_iov_pull_kern - copy bytes from vring_iov.
+ * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
+{
+ return vringh_iov_xfer(riov, dst, len, xfer_kern);
+}
+EXPORT_SYMBOL(vringh_iov_pull_kern);
+
+/**
+ * vringh_iov_push_kern - copy bytes into vring_iov.
+ * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
+}
+EXPORT_SYMBOL(vringh_iov_push_kern);
+
+/**
+ * vringh_abandon_kern - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_kern() to undo).
+ *
+ * The next vringh_get_kern() will return the old descriptor(s) again.
+ */
+void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet. */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_kern);
+
+/**
+ * vringh_complete_kern - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_kern.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_kern() after one or more calls
+ * to this function.
+ */
+int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = head;
+ used.len = len;
+
+ return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
+}
+EXPORT_SYMBOL(vringh_complete_kern);
+
+/**
+ * vringh_notify_enable_kern - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_kern(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
+}
+EXPORT_SYMBOL(vringh_notify_enable_kern);
+
+/**
+ * vringh_notify_disable_kern - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_kern(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_kern);
+}
+EXPORT_SYMBOL(vringh_notify_disable_kern);
+
+/**
+ * vringh_need_notify_kern - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_kern() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_kern(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_kern);
+}
+EXPORT_SYMBOL(vringh_need_notify_kern);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 76be6170..c04ccdf 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,6 +21,8 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/drm/Kconfig"
+source "drivers/gpu/host1x/Kconfig"
+
config VGASTATE
tristate
default n
@@ -2426,6 +2428,8 @@ config FB_MXS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS
+ select OF_VIDEOMODE
help
Framebuffer support for the MXS SoC.
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 98348ec..540909d 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -34,6 +34,77 @@
#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
+struct atmel_lcdfb_config {
+ bool have_alt_pixclock;
+ bool have_hozval;
+ bool have_intensity_bit;
+};
+
+static struct atmel_lcdfb_config at91sam9261_config = {
+ .have_hozval = true,
+ .have_intensity_bit = true,
+};
+
+static struct atmel_lcdfb_config at91sam9263_config = {
+ .have_intensity_bit = true,
+};
+
+static struct atmel_lcdfb_config at91sam9g10_config = {
+ .have_hozval = true,
+};
+
+static struct atmel_lcdfb_config at91sam9g45_config = {
+ .have_alt_pixclock = true,
+};
+
+static struct atmel_lcdfb_config at91sam9g45es_config = {
+};
+
+static struct atmel_lcdfb_config at91sam9rl_config = {
+ .have_intensity_bit = true,
+};
+
+static struct atmel_lcdfb_config at32ap_config = {
+ .have_hozval = true,
+};
+
+static const struct platform_device_id atmel_lcdfb_devtypes[] = {
+ {
+ .name = "at91sam9261-lcdfb",
+ .driver_data = (unsigned long)&at91sam9261_config,
+ }, {
+ .name = "at91sam9263-lcdfb",
+ .driver_data = (unsigned long)&at91sam9263_config,
+ }, {
+ .name = "at91sam9g10-lcdfb",
+ .driver_data = (unsigned long)&at91sam9g10_config,
+ }, {
+ .name = "at91sam9g45-lcdfb",
+ .driver_data = (unsigned long)&at91sam9g45_config,
+ }, {
+ .name = "at91sam9g45es-lcdfb",
+ .driver_data = (unsigned long)&at91sam9g45es_config,
+ }, {
+ .name = "at91sam9rl-lcdfb",
+ .driver_data = (unsigned long)&at91sam9rl_config,
+ }, {
+ .name = "at32ap-lcdfb",
+ .driver_data = (unsigned long)&at32ap_config,
+ }, {
+ /* terminator */
+ }
+};
+
+static struct atmel_lcdfb_config *
+atmel_lcdfb_get_config(struct platform_device *pdev)
+{
+ unsigned long data;
+
+ data = platform_get_device_id(pdev)->driver_data;
+
+ return (struct atmel_lcdfb_config *)data;
+}
+
#if defined(CONFIG_ARCH_AT91)
#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
| FBINFO_PARTIAL_PAN_OK \
@@ -193,14 +264,16 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static unsigned long compute_hozval(unsigned long xres, unsigned long lcdcon2)
+static unsigned long compute_hozval(struct atmel_lcdfb_info *sinfo,
+ unsigned long xres)
{
+ unsigned long lcdcon2;
unsigned long value;
- if (!(cpu_is_at91sam9261() || cpu_is_at91sam9g10()
- || cpu_is_at32ap7000()))
+ if (!sinfo->config->have_hozval)
return xres;
+ lcdcon2 = lcdc_readl(sinfo, ATMEL_LCDC_LCDCON2);
value = xres;
if ((lcdcon2 & ATMEL_LCDC_DISTYPE) != ATMEL_LCDC_DISTYPE_TFT) {
/* STN display */
@@ -423,7 +496,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
break;
case 16:
/* Older SOCs use IBGR:555 rather than BGR:565. */
- if (sinfo->have_intensity_bit)
+ if (sinfo->config->have_intensity_bit)
var->green.length = 5;
else
var->green.length = 6;
@@ -531,7 +604,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
/* Now, the LCDC core... */
/* Set pixel clock */
- if (cpu_is_at91sam9g45() && !cpu_is_at91sam9g45es())
+ if (sinfo->config->have_alt_pixclock)
pix_factor = 1;
clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
@@ -591,8 +664,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
lcdc_writel(sinfo, ATMEL_LCDC_TIM2, value);
/* Horizontal value (aka line size) */
- hozval_linesz = compute_hozval(info->var.xres,
- lcdc_readl(sinfo, ATMEL_LCDC_LCDCON2));
+ hozval_linesz = compute_hozval(sinfo, info->var.xres);
/* Display size */
value = (hozval_linesz - 1) << ATMEL_LCDC_HOZVAL_OFFSET;
@@ -684,7 +756,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
case FB_VISUAL_PSEUDOCOLOR:
if (regno < 256) {
- if (sinfo->have_intensity_bit) {
+ if (sinfo->config->have_intensity_bit) {
/* old style I+BGR:555 */
val = ((red >> 11) & 0x001f);
val |= ((green >> 6) & 0x03e0);
@@ -821,15 +893,13 @@ static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
static void atmel_lcdfb_start_clock(struct atmel_lcdfb_info *sinfo)
{
- if (sinfo->bus_clk)
- clk_enable(sinfo->bus_clk);
+ clk_enable(sinfo->bus_clk);
clk_enable(sinfo->lcdc_clk);
}
static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
{
- if (sinfo->bus_clk)
- clk_disable(sinfo->bus_clk);
+ clk_disable(sinfo->bus_clk);
clk_disable(sinfo->lcdc_clk);
}
@@ -874,10 +944,9 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
}
sinfo->info = info;
sinfo->pdev = pdev;
- if (cpu_is_at91sam9261() || cpu_is_at91sam9263() ||
- cpu_is_at91sam9rl()) {
- sinfo->have_intensity_bit = true;
- }
+ sinfo->config = atmel_lcdfb_get_config(pdev);
+ if (!sinfo->config)
+ goto free_info;
strcpy(info->fix.id, sinfo->pdev->name);
info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
@@ -888,13 +957,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
info->fix = atmel_lcdfb_fix;
/* Enable LCDC Clocks */
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()
- || cpu_is_at32ap7000()) {
- sinfo->bus_clk = clk_get(dev, "hck1");
- if (IS_ERR(sinfo->bus_clk)) {
- ret = PTR_ERR(sinfo->bus_clk);
- goto free_info;
- }
+ sinfo->bus_clk = clk_get(dev, "hclk");
+ if (IS_ERR(sinfo->bus_clk)) {
+ ret = PTR_ERR(sinfo->bus_clk);
+ goto free_info;
}
sinfo->lcdc_clk = clk_get(dev, "lcdc_clk");
if (IS_ERR(sinfo->lcdc_clk)) {
@@ -1055,8 +1121,7 @@ stop_clk:
atmel_lcdfb_stop_clock(sinfo);
clk_put(sinfo->lcdc_clk);
put_bus_clk:
- if (sinfo->bus_clk)
- clk_put(sinfo->bus_clk);
+ clk_put(sinfo->bus_clk);
free_info:
framebuffer_release(info);
out:
@@ -1081,8 +1146,7 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
unregister_framebuffer(info);
atmel_lcdfb_stop_clock(sinfo);
clk_put(sinfo->lcdc_clk);
- if (sinfo->bus_clk)
- clk_put(sinfo->bus_clk);
+ clk_put(sinfo->bus_clk);
fb_dealloc_cmap(&info->cmap);
free_irq(sinfo->irq_base, info);
iounmap(sinfo->mmio);
@@ -1151,7 +1215,7 @@ static struct platform_driver atmel_lcdfb_driver = {
.remove = __exit_p(atmel_lcdfb_remove),
.suspend = atmel_lcdfb_suspend,
.resume = atmel_lcdfb_resume,
-
+ .id_table = atmel_lcdfb_devtypes,
.driver = {
.name = "atmel_lcdfb",
.owner = THIS_MODULE,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index fa00304..1fea627 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -274,7 +274,7 @@ static int pwm_backlight_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pwm_backlight_suspend(struct device *dev)
{
struct backlight_device *bl = dev_get_drvdata(dev);
@@ -296,19 +296,16 @@ static int pwm_backlight_resume(struct device *dev)
backlight_update_status(bl);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
pwm_backlight_resume);
-#endif
-
static struct platform_driver pwm_backlight_driver = {
.driver = {
.name = "pwm-backlight",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &pwm_backlight_pm_ops,
-#endif
.of_match_table = of_match_ptr(pwm_backlight_of_match),
},
.probe = pwm_backlight_probe,
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 8d411a3..a54f7f7 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -333,29 +333,23 @@ static int proc_output(char *buf)
return p - buf;
}
-static int
-adv7393_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static ssize_t
+adv7393_read_proc(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
{
- int len;
-
- len = proc_output(page);
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
+ static const char message[] = "Usage:\n"
+ "echo 0x[REG][Value] > adv7393\n"
+ "example: echo 0x1234 >adv7393\n"
+ "writes 0x34 into Register 0x12\n";
+ return simple_read_from_buffer(buf, size, ppos, message,
+ sizeof(message));
}
-static int
+static ssize_t
adv7393_write_proc(struct file *file, const char __user * buffer,
- size_t count, void *data)
+ size_t count, loff_t *ppos)
{
- struct adv7393fb_device *fbdev = data;
+ struct adv7393fb_device *fbdev = PDE_DATA(file_inode(file));
unsigned int val;
int ret;
@@ -368,6 +362,12 @@ adv7393_write_proc(struct file *file, const char __user * buffer,
return count;
}
+static const struct file_operations fops = {
+ .read = adv7393_read_proc,
+ .write = adv7393_write_proc,
+ .llseek = default_llseek,
+};
+
static int bfin_adv7393_fb_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -506,17 +506,12 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
fbdev->info.node, fbdev->info.fix.id);
dev_info(&client->dev, "fb memory address : 0x%p\n", fbdev->fb_mem);
- entry = create_proc_entry("driver/adv7393", 0, NULL);
+ entry = proc_create_data("driver/adv7393", 0, NULL, &fops, fbdev);
if (!entry) {
dev_err(&client->dev, "unable to create /proc entry\n");
ret = -EFAULT;
goto free_fb;
}
-
- entry->read_proc = adv7393_read_proc;
- entry->write_proc = adv7393_write_proc;
- entry->data = fbdev;
-
return 0;
free_fb:
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index c3dbbe6..97db3ba 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -53,12 +53,6 @@
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
-#ifdef CONFIG_PPC_PREP
-#include <asm/machdep.h>
-#define isPReP machine_is(prep)
-#else
-#define isPReP 0
-#endif
#include <video/vga.h>
#include <video/cirrus.h>
@@ -557,30 +551,18 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
break;
case 16:
- if (isPReP) {
- var->red.offset = 2;
- var->green.offset = -3;
- var->blue.offset = 8;
- } else {
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- }
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
break;
case 24:
- if (isPReP) {
- var->red.offset = 0;
- var->green.offset = 8;
- var->blue.offset = 16;
- } else {
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- }
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
@@ -1874,17 +1856,6 @@ static void cirrusfb_imageblit(struct fb_info *info,
}
}
-#ifdef CONFIG_PPC_PREP
-#define PREP_VIDEO_BASE ((volatile unsigned long) 0xC0000000)
-#define PREP_IO_BASE ((volatile unsigned char *) 0x80000000)
-static void get_prep_addrs(unsigned long *display, unsigned long *registers)
-{
- *display = PREP_VIDEO_BASE;
- *registers = (unsigned long) PREP_IO_BASE;
-}
-
-#endif /* CONFIG_PPC_PREP */
-
#ifdef CONFIG_PCI
static int release_io_ports;
@@ -2139,21 +2110,12 @@ static int cirrusfb_pci_register(struct pci_dev *pdev,
dev_dbg(info->device, " base address 1 is 0x%Lx\n",
(unsigned long long)pdev->resource[1].start);
- if (isPReP) {
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, 0x00000000);
-#ifdef CONFIG_PPC_PREP
- get_prep_addrs(&board_addr, &info->fix.mmio_start);
-#endif
- /* PReP dies if we ioremap the IO registers, but it works w/out... */
- cinfo->regbase = (char __iomem *) info->fix.mmio_start;
- } else {
- dev_dbg(info->device,
- "Attempt to get PCI info for Cirrus Graphics Card\n");
- get_pci_addrs(pdev, &board_addr, &info->fix.mmio_start);
- /* FIXME: this forces VGA. alternatives? */
- cinfo->regbase = NULL;
- cinfo->laguna_mmio = ioremap(info->fix.mmio_start, 0x1000);
- }
+ dev_dbg(info->device,
+ "Attempt to get PCI info for Cirrus Graphics Card\n");
+ get_pci_addrs(pdev, &board_addr, &info->fix.mmio_start);
+ /* FIXME: this forces VGA. alternatives? */
+ cinfo->regbase = NULL;
+ cinfo->laguna_mmio = ioremap(info->fix.mmio_start, 0x1000);
dev_dbg(info->device, "Board address: 0x%lx, register address: 0x%lx\n",
board_addr, info->fix.mmio_start);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3cd6759..a92783e 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1228,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc)
finished:
fbcon_free_font(p, free_font);
+ if (free_font)
+ vc->vc_font.data = NULL;
if (!con_is_bound(&fb_con))
fbcon_exit();
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index dcb669e..098bfc6 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1639,6 +1639,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
+ if (fb_info->skip_vt_switch)
+ pm_vt_switch_required(fb_info->dev, false);
+ else
+ pm_vt_switch_required(fb_info->dev, true);
+
fb_var_to_videomode(&mode, &fb_info->var);
fb_add_videomode(&mode, &fb_info->modelist);
registered_fb[i] = fb_info;
@@ -1673,6 +1678,8 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (ret)
return -EINVAL;
+ pm_vt_switch_unregister(fb_info->dev);
+
unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index ab23c9b..4017833 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -1,9 +1,24 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#include <linux/bitops.h>
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 45169cb..1b2c26d 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -42,13 +42,15 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <video/of_display_timing.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/mxsfb.h>
+#include <linux/fb.h>
+#include <linux/regulator/consumer.h>
+#include <video/videomode.h>
#define REG_SET 4
#define REG_CLR 8
@@ -107,7 +109,7 @@
#define VDCTRL0_ENABLE_PRESENT (1 << 28)
#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
-#define VDCTRL0_DOTCLK_ACT_FAILING (1 << 25)
+#define VDCTRL0_DOTCLK_ACT_FALLING (1 << 25)
#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
@@ -142,6 +144,14 @@
#define BLUE 2
#define TRANSP 3
+#define STMLCDIF_8BIT 1 /** pixel data bus to the display is of 8 bit width */
+#define STMLCDIF_16BIT 0 /** pixel data bus to the display is of 16 bit width */
+#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
+#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
+
+#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */
+
enum mxsfb_devtype {
MXSFB_V3,
MXSFB_V4,
@@ -168,8 +178,8 @@ struct mxsfb_info {
unsigned ld_intf_width;
unsigned dotclk_delay;
const struct mxsfb_devdata *devdata;
- int mapped;
u32 sync;
+ struct regulator *reg_lcd;
};
#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -329,9 +339,19 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
{
struct mxsfb_info *host = to_imxfb_host(fb_info);
u32 reg;
+ int ret;
dev_dbg(&host->pdev->dev, "%s\n", __func__);
+ if (host->reg_lcd) {
+ ret = regulator_enable(host->reg_lcd);
+ if (ret) {
+ dev_err(&host->pdev->dev,
+ "lcd regulator enable failed: %d\n", ret);
+ return;
+ }
+ }
+
clk_prepare_enable(host->clk);
clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
@@ -353,6 +373,7 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
struct mxsfb_info *host = to_imxfb_host(fb_info);
unsigned loop;
u32 reg;
+ int ret;
dev_dbg(&host->pdev->dev, "%s\n", __func__);
@@ -376,6 +397,13 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
clk_disable_unprepare(host->clk);
host->enabled = 0;
+
+ if (host->reg_lcd) {
+ ret = regulator_disable(host->reg_lcd);
+ if (ret)
+ dev_err(&host->pdev->dev,
+ "lcd regulator disable failed: %d\n", ret);
+ }
}
static int mxsfb_set_par(struct fb_info *fb_info)
@@ -459,8 +487,8 @@ static int mxsfb_set_par(struct fb_info *fb_info)
vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
- if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT)
- vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
+ if (host->sync & MXSFB_SYNC_DOTCLK_FALLING_ACT)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, host->base + LCDC_VDCTRL0);
@@ -679,14 +707,105 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
return 0;
}
+static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ struct fb_var_screeninfo *var = &fb_info->var;
+ struct device *dev = &host->pdev->dev;
+ struct device_node *np = host->pdev->dev.of_node;
+ struct device_node *display_np;
+ struct device_node *timings_np;
+ struct display_timings *timings;
+ u32 width;
+ int i;
+ int ret = 0;
+
+ display_np = of_parse_phandle(np, "display", 0);
+ if (!display_np) {
+ dev_err(dev, "failed to find display phandle\n");
+ return -ENOENT;
+ }
+
+ ret = of_property_read_u32(display_np, "bus-width", &width);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property bus-width\n");
+ goto put_display_node;
+ }
+
+ switch (width) {
+ case 8:
+ host->ld_intf_width = STMLCDIF_8BIT;
+ break;
+ case 16:
+ host->ld_intf_width = STMLCDIF_16BIT;
+ break;
+ case 18:
+ host->ld_intf_width = STMLCDIF_18BIT;
+ break;
+ case 24:
+ host->ld_intf_width = STMLCDIF_24BIT;
+ break;
+ default:
+ dev_err(dev, "invalid bus-width value\n");
+ ret = -EINVAL;
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "bits-per-pixel",
+ &var->bits_per_pixel);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property bits-per-pixel\n");
+ goto put_display_node;
+ }
+
+ timings = of_get_display_timings(display_np);
+ if (!timings) {
+ dev_err(dev, "failed to get display timings\n");
+ ret = -ENOENT;
+ goto put_display_node;
+ }
+
+ timings_np = of_find_node_by_name(display_np,
+ "display-timings");
+ if (!timings_np) {
+ dev_err(dev, "failed to find display-timings node\n");
+ ret = -ENOENT;
+ goto put_display_node;
+ }
+
+ for (i = 0; i < of_get_child_count(timings_np); i++) {
+ struct videomode vm;
+ struct fb_videomode fb_vm;
+
+ ret = videomode_from_timing(timings, &vm, i);
+ if (ret < 0)
+ goto put_timings_node;
+ ret = fb_videomode_from_videomode(&vm, &fb_vm);
+ if (ret < 0)
+ goto put_timings_node;
+
+ if (vm.data_flags & DISPLAY_FLAGS_DE_HIGH)
+ host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
+ if (vm.data_flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
+ fb_add_videomode(&fb_vm, &fb_info->modelist);
+ }
+
+put_timings_node:
+ of_node_put(timings_np);
+put_display_node:
+ of_node_put(display_np);
+ return ret;
+}
+
static int mxsfb_init_fbinfo(struct mxsfb_info *host)
{
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
- struct mxsfb_platform_data *pdata = host->pdev->dev.platform_data;
dma_addr_t fb_phys;
void *fb_virt;
- unsigned fb_size = pdata->fb_size;
+ unsigned fb_size;
+ int ret;
fb_info->fbops = &mxsfb_ops;
fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
@@ -696,40 +815,22 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host)
fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
fb_info->fix.accel = FB_ACCEL_NONE;
- var->bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+ ret = mxsfb_init_fbinfo_dt(host);
+ if (ret)
+ return ret;
+
var->nonstd = 0;
var->activate = FB_ACTIVATE_NOW;
var->accel_flags = 0;
var->vmode = FB_VMODE_NONINTERLACED;
- host->dotclk_delay = pdata->dotclk_delay;
- host->ld_intf_width = pdata->ld_intf_width;
-
/* Memory allocation for framebuffer */
- if (pdata->fb_phys) {
- if (!fb_size)
- return -EINVAL;
-
- fb_phys = pdata->fb_phys;
-
- if (!request_mem_region(fb_phys, fb_size, host->pdev->name))
- return -ENOMEM;
+ fb_size = SZ_2M;
+ fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
+ if (!fb_virt)
+ return -ENOMEM;
- fb_virt = ioremap(fb_phys, fb_size);
- if (!fb_virt) {
- release_mem_region(fb_phys, fb_size);
- return -ENOMEM;
- }
- host->mapped = 1;
- } else {
- if (!fb_size)
- fb_size = SZ_2M; /* default */
- fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
- if (!fb_virt)
- return -ENOMEM;
-
- fb_phys = virt_to_phys(fb_virt);
- }
+ fb_phys = virt_to_phys(fb_virt);
fb_info->fix.smem_start = fb_phys;
fb_info->screen_base = fb_virt;
@@ -745,13 +846,7 @@ static void mxsfb_free_videomem(struct mxsfb_info *host)
{
struct fb_info *fb_info = &host->fb_info;
- if (host->mapped) {
- iounmap(fb_info->screen_base);
- release_mem_region(fb_info->fix.smem_start,
- fb_info->screen_size);
- } else {
- free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
- }
+ free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
}
static struct platform_device_id mxsfb_devtype[] = {
@@ -778,47 +873,35 @@ static int mxsfb_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxsfb_dt_ids, &pdev->dev);
- struct mxsfb_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
struct mxsfb_info *host;
struct fb_info *fb_info;
struct fb_modelist *modelist;
struct pinctrl *pinctrl;
- int panel_enable;
- enum of_gpio_flags flags;
- int i, ret;
+ int ret;
if (of_id)
pdev->id_entry = of_id->data;
- if (!pdata) {
- dev_err(&pdev->dev, "No platformdata. Giving up\n");
- return -ENODEV;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Cannot get memory IO resource\n");
return -ENODEV;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name))
- return -EBUSY;
-
fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev);
if (!fb_info) {
dev_err(&pdev->dev, "Failed to allocate fbdev\n");
- ret = -ENOMEM;
- goto error_alloc_info;
+ return -ENOMEM;
}
host = to_imxfb_host(fb_info);
- host->base = ioremap(res->start, resource_size(res));
- if (!host->base) {
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base)) {
dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto error_ioremap;
+ ret = PTR_ERR(host->base);
+ goto fb_release;
}
host->pdev = pdev;
@@ -829,47 +912,31 @@ static int mxsfb_probe(struct platform_device *pdev)
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
- goto error_getpin;
+ goto fb_release;
}
- host->clk = clk_get(&host->pdev->dev, NULL);
+ host->clk = devm_clk_get(&host->pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto error_getclock;
+ goto fb_release;
}
- panel_enable = of_get_named_gpio_flags(pdev->dev.of_node,
- "panel-enable-gpios", 0, &flags);
- if (gpio_is_valid(panel_enable)) {
- unsigned long f = GPIOF_OUT_INIT_HIGH;
- if (flags == OF_GPIO_ACTIVE_LOW)
- f = GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&pdev->dev, panel_enable,
- f, "panel-enable");
- if (ret) {
- dev_err(&pdev->dev,
- "failed to request gpio %d: %d\n",
- panel_enable, ret);
- goto error_panel_enable;
- }
- }
+ host->reg_lcd = devm_regulator_get(&pdev->dev, "lcd");
+ if (IS_ERR(host->reg_lcd))
+ host->reg_lcd = NULL;
- fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ fb_info->pseudo_palette = devm_kzalloc(&pdev->dev, sizeof(u32) * 16,
+ GFP_KERNEL);
if (!fb_info->pseudo_palette) {
ret = -ENOMEM;
- goto error_pseudo_pallette;
+ goto fb_release;
}
INIT_LIST_HEAD(&fb_info->modelist);
- host->sync = pdata->sync;
-
ret = mxsfb_init_fbinfo(host);
if (ret != 0)
- goto error_init_fb;
-
- for (i = 0; i < pdata->mode_count; i++)
- fb_add_videomode(&pdata->mode_list[i], &fb_info->modelist);
+ goto fb_release;
modelist = list_first_entry(&fb_info->modelist,
struct fb_modelist, list);
@@ -883,7 +950,7 @@ static int mxsfb_probe(struct platform_device *pdev)
ret = register_framebuffer(fb_info);
if (ret != 0) {
dev_err(&pdev->dev,"Failed to register framebuffer\n");
- goto error_register;
+ goto fb_destroy;
}
if (!host->enabled) {
@@ -896,22 +963,12 @@ static int mxsfb_probe(struct platform_device *pdev)
return 0;
-error_register:
+fb_destroy:
if (host->enabled)
clk_disable_unprepare(host->clk);
fb_destroy_modelist(&fb_info->modelist);
-error_init_fb:
- kfree(fb_info->pseudo_palette);
-error_pseudo_pallette:
-error_panel_enable:
- clk_put(host->clk);
-error_getclock:
-error_getpin:
- iounmap(host->base);
-error_ioremap:
+fb_release:
framebuffer_release(fb_info);
-error_alloc_info:
- release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -920,19 +977,14 @@ static int mxsfb_remove(struct platform_device *pdev)
{
struct fb_info *fb_info = platform_get_drvdata(pdev);
struct mxsfb_info *host = to_imxfb_host(fb_info);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (host->enabled)
mxsfb_disable_controller(fb_info);
unregister_framebuffer(fb_info);
- kfree(fb_info->pseudo_palette);
mxsfb_free_videomem(host);
- iounmap(host->base);
- clk_put(host->clk);
framebuffer_release(fb_info);
- release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index 5ea7cb9..296e5c5 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
obj-$(CONFIG_OMAP2_DSS) += dss/
-obj-$(CONFIG_FB_OMAP2) += omapfb/
obj-y += displays/
+obj-$(CONFIG_FB_OMAP2) += omapfb/
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index 72699f8..d7f69c0 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -29,8 +29,10 @@
#include <linux/sched.h>
#include <linux/backlight.h>
#include <linux/fb.h>
+#include <linux/gpio.h>
#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
#define MIPID_CMD_READ_DISP_ID 0x04
#define MIPID_CMD_READ_RED 0x06
@@ -336,8 +338,6 @@ static int acx565akm_bl_update_status(struct backlight_device *dev)
r = 0;
if (md->has_bc)
acx565akm_set_brightness(md, level);
- else if (md->dssdev->set_backlight)
- r = md->dssdev->set_backlight(md->dssdev, level);
else
r = -ENODEV;
@@ -352,7 +352,7 @@ static int acx565akm_bl_get_intensity(struct backlight_device *dev)
dev_dbg(&dev->dev, "%s\n", __func__);
- if (!md->has_bc && md->dssdev->set_backlight == NULL)
+ if (!md->has_bc)
return -ENODEV;
if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
@@ -496,21 +496,38 @@ static struct omap_video_timings acx_panel_timings = {
.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
+static struct panel_acx565akm_data *get_panel_data(struct omap_dss_device *dssdev)
+{
+ return (struct panel_acx565akm_data *) dssdev->data;
+}
+
static int acx_panel_probe(struct omap_dss_device *dssdev)
{
int r;
struct acx565akm_device *md = &acx_dev;
+ struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
struct backlight_device *bldev;
int max_brightness, brightness;
struct backlight_properties props;
dev_dbg(&dssdev->dev, "%s\n", __func__);
+ if (!panel_data)
+ return -EINVAL;
+
/* FIXME AC bias ? */
dssdev->panel.timings = acx_panel_timings;
- if (dssdev->platform_enable)
- dssdev->platform_enable(dssdev);
+ if (gpio_is_valid(panel_data->reset_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, panel_data->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd reset");
+ if (r)
+ return r;
+ }
+
+ if (gpio_is_valid(panel_data->reset_gpio))
+ gpio_set_value(panel_data->reset_gpio, 1);
+
/*
* After reset we have to wait 5 msec before the first
* command can be sent.
@@ -522,8 +539,9 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
r = panel_detect(md);
if (r) {
dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
- if (!md->enabled && dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ if (!md->enabled && gpio_is_valid(panel_data->reset_gpio))
+ gpio_set_value(panel_data->reset_gpio, 0);
+
return r;
}
@@ -532,8 +550,8 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
mutex_unlock(&acx_dev.mutex);
if (!md->enabled) {
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ if (gpio_is_valid(panel_data->reset_gpio))
+ gpio_set_value(panel_data->reset_gpio, 0);
}
/*------- Backlight control --------*/
@@ -557,15 +575,10 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
md->cabc_mode = get_hw_cabc_mode(md);
}
- if (md->has_bc)
- max_brightness = 255;
- else
- max_brightness = dssdev->max_backlight_level;
+ max_brightness = 255;
if (md->has_bc)
brightness = acx565akm_get_actual_brightness(md);
- else if (dssdev->get_backlight)
- brightness = dssdev->get_backlight(dssdev);
else
brightness = 0;
@@ -591,6 +604,7 @@ static void acx_panel_remove(struct omap_dss_device *dssdev)
static int acx_panel_power_on(struct omap_dss_device *dssdev)
{
struct acx565akm_device *md = &acx_dev;
+ struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
int r;
dev_dbg(&dssdev->dev, "%s\n", __func__);
@@ -612,11 +626,8 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
/*FIXME tweak me */
msleep(50);
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto fail;
- }
+ if (gpio_is_valid(panel_data->reset_gpio))
+ gpio_set_value(panel_data->reset_gpio, 1);
if (md->enabled) {
dev_dbg(&md->spi->dev, "panel already enabled\n");
@@ -645,8 +656,7 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
mutex_unlock(&md->mutex);
return acx565akm_bl_update_status(md->bl_dev);
-fail:
- omapdss_sdi_display_disable(dssdev);
+
fail_unlock:
mutex_unlock(&md->mutex);
return r;
@@ -655,6 +665,7 @@ fail_unlock:
static void acx_panel_power_off(struct omap_dss_device *dssdev)
{
struct acx565akm_device *md = &acx_dev;
+ struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
dev_dbg(&dssdev->dev, "%s\n", __func__);
@@ -678,8 +689,8 @@ static void acx_panel_power_off(struct omap_dss_device *dssdev)
*/
msleep(50);
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ if (gpio_is_valid(panel_data->reset_gpio))
+ gpio_set_value(panel_data->reset_gpio, 0);
/* FIXME need to tweak this delay */
msleep(100);
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index c904f42..97363f7 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -33,9 +33,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
struct panel_config {
struct omap_video_timings timings;
@@ -533,7 +534,7 @@ static inline struct panel_generic_dpi_data
static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
{
- int r;
+ int r, i;
struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
struct panel_config *panel_config = drv_data->panel_config;
@@ -552,15 +553,13 @@ static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
if (panel_config->power_on_delay)
msleep(panel_config->power_on_delay);
- if (panel_data->platform_enable) {
- r = panel_data->platform_enable(dssdev);
- if (r)
- goto err1;
+ for (i = 0; i < panel_data->num_gpios; ++i) {
+ gpio_set_value_cansleep(panel_data->gpios[i],
+ panel_data->gpio_invert[i] ? 0 : 1);
}
return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
+
err0:
return r;
}
@@ -570,12 +569,15 @@ static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
struct panel_config *panel_config = drv_data->panel_config;
+ int i;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- if (panel_data->platform_disable)
- panel_data->platform_disable(dssdev);
+ for (i = panel_data->num_gpios - 1; i >= 0; --i) {
+ gpio_set_value_cansleep(panel_data->gpios[i],
+ panel_data->gpio_invert[i] ? 1 : 0);
+ }
/* wait couple of vsyncs after disabling the LCD */
if (panel_config->power_off_delay)
@@ -589,7 +591,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
struct panel_config *panel_config = NULL;
struct panel_drv_data *drv_data = NULL;
- int i;
+ int i, r;
dev_dbg(&dssdev->dev, "probe\n");
@@ -606,9 +608,18 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
if (!panel_config)
return -EINVAL;
+ for (i = 0; i < panel_data->num_gpios; ++i) {
+ r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+ panel_data->gpio_invert[i] ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ "panel gpio");
+ if (r)
+ return r;
+ }
+
dssdev->panel.timings = panel_config->timings;
- drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ drv_data = devm_kzalloc(&dssdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
@@ -624,12 +635,8 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
-
dev_dbg(&dssdev->dev, "remove\n");
- kfree(drv_data);
-
dev_set_drvdata(&dssdev->dev, NULL);
}
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 6e5abe8..4ea6548 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -20,8 +20,10 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/mutex.h>
+#include <linux/gpio.h>
#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
struct lb035q02_data {
struct mutex lock;
@@ -48,9 +50,16 @@ static struct omap_video_timings lb035q02_timings = {
.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
+static inline struct panel_generic_dpi_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+ return (struct panel_generic_dpi_data *) dssdev->data;
+}
+
static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
{
- int r;
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ int r, i;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
@@ -62,54 +71,65 @@ static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
if (r)
goto err0;
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
+ for (i = 0; i < panel_data->num_gpios; ++i) {
+ gpio_set_value_cansleep(panel_data->gpios[i],
+ panel_data->gpio_invert[i] ? 0 : 1);
}
return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
+
err0:
return r;
}
static void lb035q02_panel_power_off(struct omap_dss_device *dssdev)
{
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ int i;
+
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ for (i = panel_data->num_gpios - 1; i >= 0; --i) {
+ gpio_set_value_cansleep(panel_data->gpios[i],
+ panel_data->gpio_invert[i] ? 1 : 0);
+ }
omapdss_dpi_display_disable(dssdev);
}
static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
{
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
struct lb035q02_data *ld;
- int r;
+ int r, i;
+
+ if (!panel_data)
+ return -EINVAL;
dssdev->panel.timings = lb035q02_timings;
- ld = kzalloc(sizeof(*ld), GFP_KERNEL);
- if (!ld) {
- r = -ENOMEM;
- goto err;
+ ld = devm_kzalloc(&dssdev->dev, sizeof(*ld), GFP_KERNEL);
+ if (!ld)
+ return -ENOMEM;
+
+ for (i = 0; i < panel_data->num_gpios; ++i) {
+ r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+ panel_data->gpio_invert[i] ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ "panel gpio");
+ if (r)
+ return r;
}
+
mutex_init(&ld->lock);
dev_set_drvdata(&dssdev->dev, ld);
+
return 0;
-err:
- return r;
}
static void lb035q02_panel_remove(struct omap_dss_device *dssdev)
{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
-
- kfree(ld);
}
static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index dd12947..f94ead6 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -5,11 +5,10 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
-#include <linux/backlight.h>
#include <linux/fb.h>
#include <video/omapdss.h>
-#include <video/omap-panel-n8x0.h>
+#include <video/omap-panel-data.h>
#define BLIZZARD_REV_CODE 0x00
#define BLIZZARD_CONFIG 0x02
@@ -69,7 +68,6 @@ static struct panel_drv_data {
struct omap_dss_device *dssdev;
struct spi_device *spidev;
- struct backlight_device *bldev;
int blizzard_ver;
} s_drv_data;
@@ -297,12 +295,6 @@ static int n8x0_panel_power_on(struct omap_dss_device *dssdev)
gpio_direction_output(bdata->ctrl_pwrdown, 1);
- if (bdata->platform_enable) {
- r = bdata->platform_enable(dssdev);
- if (r)
- goto err_plat_en;
- }
-
omapdss_rfbi_set_size(dssdev, dssdev->panel.timings.x_res,
dssdev->panel.timings.y_res);
omapdss_rfbi_set_pixel_size(dssdev, dssdev->ctrl.pixel_size);
@@ -375,9 +367,6 @@ err_inv_panel:
err_inv_chip:
omapdss_rfbi_display_disable(dssdev);
err_rfbi_en:
- if (bdata->platform_disable)
- bdata->platform_disable(dssdev);
-err_plat_en:
gpio_direction_output(bdata->ctrl_pwrdown, 0);
return r;
}
@@ -394,9 +383,6 @@ static void n8x0_panel_power_off(struct omap_dss_device *dssdev)
send_display_off(spi);
send_sleep_in(spi);
- if (bdata->platform_disable)
- bdata->platform_disable(dssdev);
-
/*
* HACK: we should turn off the panel here, but there is some problem
* with the initialization sequence, and we fail to init the panel if we
@@ -424,54 +410,10 @@ static const struct rfbi_timings n8x0_panel_timings = {
.cs_pulse_width = 0,
};
-static int n8x0_bl_update_status(struct backlight_device *dev)
-{
- struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
- struct panel_n8x0_data *bdata = get_board_data(dssdev);
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- int r;
- int level;
-
- mutex_lock(&ddata->lock);
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
-
- if (!bdata->set_backlight)
- r = -EINVAL;
- else
- r = bdata->set_backlight(dssdev, level);
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int n8x0_bl_get_intensity(struct backlight_device *dev)
-{
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- return dev->props.brightness;
-
- return 0;
-}
-
-static const struct backlight_ops n8x0_bl_ops = {
- .get_brightness = n8x0_bl_get_intensity,
- .update_status = n8x0_bl_update_status,
-};
-
static int n8x0_panel_probe(struct omap_dss_device *dssdev)
{
struct panel_n8x0_data *bdata = get_board_data(dssdev);
struct panel_drv_data *ddata;
- struct backlight_device *bldev;
- struct backlight_properties props;
int r;
dev_dbg(&dssdev->dev, "probe\n");
@@ -491,40 +433,27 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev)
dssdev->ctrl.rfbi_timings = n8x0_panel_timings;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
- memset(&props, 0, sizeof(props));
- props.max_brightness = 127;
- props.type = BACKLIGHT_PLATFORM;
- bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
- dssdev, &n8x0_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- dev_err(&dssdev->dev, "register backlight failed\n");
- return r;
+ if (gpio_is_valid(bdata->panel_reset)) {
+ r = devm_gpio_request_one(&dssdev->dev, bdata->panel_reset,
+ GPIOF_OUT_INIT_LOW, "PANEL RESET");
+ if (r)
+ return r;
}
- ddata->bldev = bldev;
-
- bldev->props.fb_blank = FB_BLANK_UNBLANK;
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 127;
-
- n8x0_bl_update_status(bldev);
+ if (gpio_is_valid(bdata->ctrl_pwrdown)) {
+ r = devm_gpio_request_one(&dssdev->dev, bdata->ctrl_pwrdown,
+ GPIOF_OUT_INIT_LOW, "PANEL PWRDOWN");
+ if (r)
+ return r;
+ }
return 0;
}
static void n8x0_panel_remove(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- struct backlight_device *bldev;
-
dev_dbg(&dssdev->dev, "remove\n");
- bldev = ddata->bldev;
- bldev->props.power = FB_BLANK_POWERDOWN;
- n8x0_bl_update_status(bldev);
- backlight_device_unregister(bldev);
-
dev_set_drvdata(&dssdev->dev, NULL);
}
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index c4e9c2b..20c3cd9 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -19,10 +19,11 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/backlight.h>
#include <linux/fb.h>
+#include <linux/gpio.h>
#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
#define LCD_XRES 800
#define LCD_YRES 480
@@ -32,10 +33,6 @@
*/
#define LCD_PIXEL_CLOCK 23800
-struct nec_8048_data {
- struct backlight_device *bl;
-};
-
static const struct {
unsigned char addr;
unsigned char dat;
@@ -84,93 +81,47 @@ static struct omap_video_timings nec_8048_panel_timings = {
.sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
-static int nec_8048_bl_update_status(struct backlight_device *bl)
-{
- struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
- int level;
-
- if (!dssdev->set_backlight)
- return -EINVAL;
-
- if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
- bl->props.power == FB_BLANK_UNBLANK)
- level = bl->props.brightness;
- else
- level = 0;
-
- return dssdev->set_backlight(dssdev, level);
-}
-
-static int nec_8048_bl_get_brightness(struct backlight_device *bl)
+static inline struct panel_nec_nl8048_data
+*get_panel_data(const struct omap_dss_device *dssdev)
{
- if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
- bl->props.power == FB_BLANK_UNBLANK)
- return bl->props.brightness;
-
- return 0;
+ return (struct panel_nec_nl8048_data *) dssdev->data;
}
-static const struct backlight_ops nec_8048_bl_ops = {
- .get_brightness = nec_8048_bl_get_brightness,
- .update_status = nec_8048_bl_update_status,
-};
-
static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
{
- struct backlight_device *bl;
- struct nec_8048_data *necd;
- struct backlight_properties props;
+ struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
int r;
- dssdev->panel.timings = nec_8048_panel_timings;
-
- necd = kzalloc(sizeof(*necd), GFP_KERNEL);
- if (!necd)
- return -ENOMEM;
-
- dev_set_drvdata(&dssdev->dev, necd);
+ if (!pd)
+ return -EINVAL;
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 255;
+ dssdev->panel.timings = nec_8048_panel_timings;
- bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev,
- &nec_8048_bl_ops, &props);
- if (IS_ERR(bl)) {
- r = PTR_ERR(bl);
- kfree(necd);
- return r;
+ if (gpio_is_valid(pd->qvga_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->qvga_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd QVGA");
+ if (r)
+ return r;
}
- necd->bl = bl;
-
- bl->props.fb_blank = FB_BLANK_UNBLANK;
- bl->props.power = FB_BLANK_UNBLANK;
- bl->props.max_brightness = dssdev->max_backlight_level;
- bl->props.brightness = dssdev->max_backlight_level;
- r = nec_8048_bl_update_status(bl);
- if (r < 0)
- dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+ if (gpio_is_valid(pd->res_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->res_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd RES");
+ if (r)
+ return r;
+ }
return 0;
}
static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
{
- struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
- struct backlight_device *bl = necd->bl;
-
- bl->props.power = FB_BLANK_POWERDOWN;
- nec_8048_bl_update_status(bl);
- backlight_device_unregister(bl);
-
- kfree(necd);
}
static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
{
+ struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
int r;
- struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
- struct backlight_device *bl = necd->bl;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
@@ -182,36 +133,24 @@ static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
if (r)
goto err0;
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
- }
-
- r = nec_8048_bl_update_status(bl);
- if (r < 0)
- dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+ if (gpio_is_valid(pd->res_gpio))
+ gpio_set_value_cansleep(pd->res_gpio, 1);
return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
+
err0:
return r;
}
static void nec_8048_panel_power_off(struct omap_dss_device *dssdev)
{
- struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
- struct backlight_device *bl = necd->bl;
+ struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- bl->props.brightness = 0;
- nec_8048_bl_update_status(bl);
-
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ if (gpio_is_valid(pd->res_gpio))
+ gpio_set_value_cansleep(pd->res_gpio, 0);
omapdss_dpi_display_disable(dssdev);
}
@@ -303,16 +242,22 @@ static int nec_8048_spi_remove(struct spi_device *spi)
return 0;
}
-static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+
+static int nec_8048_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
+
nec_8048_spi_send(spi, 2, 0x01);
mdelay(40);
return 0;
}
-static int nec_8048_spi_resume(struct spi_device *spi)
+static int nec_8048_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
+
/* reinitialize the panel */
spi_setup(spi);
nec_8048_spi_send(spi, 2, 0x00);
@@ -321,14 +266,20 @@ static int nec_8048_spi_resume(struct spi_device *spi)
return 0;
}
+static SIMPLE_DEV_PM_OPS(nec_8048_spi_pm_ops, nec_8048_spi_suspend,
+ nec_8048_spi_resume);
+#define NEC_8048_SPI_PM_OPS (&nec_8048_spi_pm_ops)
+#else
+#define NEC_8048_SPI_PM_OPS NULL
+#endif
+
static struct spi_driver nec_8048_spi_driver = {
.probe = nec_8048_spi_probe,
.remove = nec_8048_spi_remove,
- .suspend = nec_8048_spi_suspend,
- .resume = nec_8048_spi_resume,
.driver = {
.name = "nec_8048_spi",
.owner = THIS_MODULE,
+ .pm = NEC_8048_SPI_PM_OPS,
},
};
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
index 1b94018..62f2db0 100644
--- a/drivers/video/omap2/displays/panel-picodlp.c
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -31,7 +31,7 @@
#include <linux/gpio.h>
#include <video/omapdss.h>
-#include <video/omap-panel-picodlp.h>
+#include <video/omap-panel-data.h>
#include "panel-picodlp.h"
@@ -354,12 +354,6 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- return r;
- }
-
gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
msleep(1);
gpio_set_value(picodlp_pdata->pwrgood_gpio, 1);
@@ -398,9 +392,6 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
err:
omapdss_dpi_display_disable(dssdev);
err1:
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
return r;
}
@@ -412,9 +403,6 @@ static void picodlp_panel_power_off(struct omap_dss_device *dssdev)
gpio_set_value(picodlp_pdata->emu_done_gpio, 0);
gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
-
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
}
static int picodlp_panel_probe(struct omap_dss_device *dssdev)
@@ -423,11 +411,14 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
struct i2c_adapter *adapter;
struct i2c_client *picodlp_i2c_client;
- int r = 0, picodlp_adapter_id;
+ int r, picodlp_adapter_id;
dssdev->panel.timings = pico_ls_timings;
- picod = kzalloc(sizeof(struct picodlp_data), GFP_KERNEL);
+ if (!picodlp_pdata)
+ return -EINVAL;
+
+ picod = devm_kzalloc(&dssdev->dev, sizeof(*picod), GFP_KERNEL);
if (!picod)
return -ENOMEM;
@@ -438,25 +429,37 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
adapter = i2c_get_adapter(picodlp_adapter_id);
if (!adapter) {
dev_err(&dssdev->dev, "can't get i2c adapter\n");
- r = -ENODEV;
- goto err;
+ return -ENODEV;
}
picodlp_i2c_client = i2c_new_device(adapter, &picodlp_i2c_board_info);
if (!picodlp_i2c_client) {
dev_err(&dssdev->dev, "can't add i2c device::"
" picodlp_i2c_client is NULL\n");
- r = -ENODEV;
- goto err;
+ return -ENODEV;
}
picod->picodlp_i2c_client = picodlp_i2c_client;
dev_set_drvdata(&dssdev->dev, picod);
- return r;
-err:
- kfree(picod);
- return r;
+
+ if (gpio_is_valid(picodlp_pdata->emu_done_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev,
+ picodlp_pdata->emu_done_gpio,
+ GPIOF_IN, "DLP EMU DONE");
+ if (r)
+ return r;
+ }
+
+ if (gpio_is_valid(picodlp_pdata->pwrgood_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev,
+ picodlp_pdata->pwrgood_gpio,
+ GPIOF_OUT_INIT_LOW, "DLP PWRGOOD");
+ if (r)
+ return r;
+ }
+
+ return 0;
}
static void picodlp_panel_remove(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index cada8c6..74cb0eb 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -20,16 +20,13 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#include <video/omapdss.h>
-
-struct sharp_data {
- struct backlight_device *bl;
-};
+#include <video/omap-panel-data.h>
static struct omap_video_timings sharp_ls_timings = {
.x_res = 480,
@@ -52,91 +49,67 @@ static struct omap_video_timings sharp_ls_timings = {
.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
-static int sharp_ls_bl_update_status(struct backlight_device *bl)
+static inline struct panel_sharp_ls037v7dw01_data
+*get_panel_data(const struct omap_dss_device *dssdev)
{
- struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
- int level;
-
- if (!dssdev->set_backlight)
- return -EINVAL;
-
- if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
- bl->props.power == FB_BLANK_UNBLANK)
- level = bl->props.brightness;
- else
- level = 0;
-
- return dssdev->set_backlight(dssdev, level);
+ return (struct panel_sharp_ls037v7dw01_data *) dssdev->data;
}
-static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
-{
- if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
- bl->props.power == FB_BLANK_UNBLANK)
- return bl->props.brightness;
-
- return 0;
-}
-
-static const struct backlight_ops sharp_ls_bl_ops = {
- .get_brightness = sharp_ls_bl_get_brightness,
- .update_status = sharp_ls_bl_update_status,
-};
-
-
-
static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
{
- struct backlight_properties props;
- struct backlight_device *bl;
- struct sharp_data *sd;
+ struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
int r;
+ if (!pd)
+ return -EINVAL;
+
dssdev->panel.timings = sharp_ls_timings;
- sd = kzalloc(sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
+ if (gpio_is_valid(pd->mo_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->mo_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd MO");
+ if (r)
+ return r;
+ }
- dev_set_drvdata(&dssdev->dev, sd);
+ if (gpio_is_valid(pd->lr_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->lr_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd LR");
+ if (r)
+ return r;
+ }
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = dssdev->max_backlight_level;
- props.type = BACKLIGHT_RAW;
+ if (gpio_is_valid(pd->ud_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->ud_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd UD");
+ if (r)
+ return r;
+ }
- bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
- &sharp_ls_bl_ops, &props);
- if (IS_ERR(bl)) {
- r = PTR_ERR(bl);
- kfree(sd);
- return r;
+ if (gpio_is_valid(pd->resb_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->resb_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd RESB");
+ if (r)
+ return r;
}
- sd->bl = bl;
- bl->props.fb_blank = FB_BLANK_UNBLANK;
- bl->props.power = FB_BLANK_UNBLANK;
- bl->props.brightness = dssdev->max_backlight_level;
- r = sharp_ls_bl_update_status(bl);
- if (r < 0)
- dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+ if (gpio_is_valid(pd->ini_gpio)) {
+ r = devm_gpio_request_one(&dssdev->dev, pd->ini_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd INI");
+ if (r)
+ return r;
+ }
return 0;
}
static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev)
{
- struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
- struct backlight_device *bl = sd->bl;
-
- bl->props.power = FB_BLANK_POWERDOWN;
- sharp_ls_bl_update_status(bl);
- backlight_device_unregister(bl);
-
- kfree(sd);
}
static int sharp_ls_power_on(struct omap_dss_device *dssdev)
{
+ struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
int r = 0;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
@@ -152,26 +125,29 @@ static int sharp_ls_power_on(struct omap_dss_device *dssdev)
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
- }
+ if (gpio_is_valid(pd->resb_gpio))
+ gpio_set_value_cansleep(pd->resb_gpio, 1);
+
+ if (gpio_is_valid(pd->ini_gpio))
+ gpio_set_value_cansleep(pd->ini_gpio, 1);
return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
err0:
return r;
}
static void sharp_ls_power_off(struct omap_dss_device *dssdev)
{
+ struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
+
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ if (gpio_is_valid(pd->ini_gpio))
+ gpio_set_value_cansleep(pd->ini_gpio, 0);
+
+ if (gpio_is_valid(pd->resb_gpio))
+ gpio_set_value_cansleep(pd->resb_gpio, 0);
/* wait at least 5 vsyncs after disabling the LCD */
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index a32407a..c4f78bd 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -33,7 +33,7 @@
#include <linux/mutex.h>
#include <video/omapdss.h>
-#include <video/omap-panel-nokia-dsi.h>
+#include <video/omap-panel-data.h>
#include <video/mipi_display.h>
/* DSI Virtual channel. Hardcoded for now. */
@@ -54,61 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
static int taal_panel_reset(struct omap_dss_device *dssdev);
-/**
- * struct panel_config - panel configuration
- * @name: panel name
- * @type: panel type
- * @timings: panel resolution
- * @sleep: various panel specific delays, passed to msleep() if non-zero
- * @reset_sequence: reset sequence timings, passed to udelay() if non-zero
- * @regulators: array of panel regulators
- * @num_regulators: number of regulators in the array
- */
-struct panel_config {
- const char *name;
- int type;
-
- struct omap_video_timings timings;
-
- struct {
- unsigned int sleep_in;
- unsigned int sleep_out;
- unsigned int hw_reset;
- unsigned int enable_te;
- } sleep;
-
- struct {
- unsigned int high;
- unsigned int low;
- } reset_sequence;
-
-};
-
-enum {
- PANEL_TAAL,
-};
-
-static struct panel_config panel_configs[] = {
- {
- .name = "taal",
- .type = PANEL_TAAL,
- .timings = {
- .x_res = 864,
- .y_res = 480,
- },
- .sleep = {
- .sleep_in = 5,
- .sleep_out = 5,
- .hw_reset = 5,
- .enable_te = 100, /* possible panel bug */
- },
- .reset_sequence = {
- .high = 10,
- .low = 10,
- },
- },
-};
-
struct taal_data {
struct mutex lock;
@@ -121,9 +66,6 @@ struct taal_data {
struct omap_dss_device *dssdev;
- /* panel specific HW info */
- struct panel_config *panel_config;
-
/* panel HW configuration from DT or platform data */
int reset_gpio;
int ext_te_gpio;
@@ -134,8 +76,6 @@ struct taal_data {
/* runtime variables */
bool enabled;
- u8 rotate;
- bool mirror;
bool te_enabled;
@@ -221,8 +161,7 @@ static int taal_sleep_in(struct taal_data *td)
hw_guard_start(td, 120);
- if (td->panel_config->sleep.sleep_in)
- msleep(td->panel_config->sleep.sleep_in);
+ msleep(5);
return 0;
}
@@ -239,8 +178,7 @@ static int taal_sleep_out(struct taal_data *td)
hw_guard_start(td, 120);
- if (td->panel_config->sleep.sleep_out)
- msleep(td->panel_config->sleep.sleep_out);
+ msleep(5);
return 0;
}
@@ -262,49 +200,6 @@ static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3)
return 0;
}
-static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
-{
- int r;
- u8 mode;
- int b5, b6, b7;
-
- r = taal_dcs_read_1(td, MIPI_DCS_GET_ADDRESS_MODE, &mode);
- if (r)
- return r;
-
- switch (rotate) {
- default:
- case 0:
- b7 = 0;
- b6 = 0;
- b5 = 0;
- break;
- case 1:
- b7 = 0;
- b6 = 1;
- b5 = 1;
- break;
- case 2:
- b7 = 1;
- b6 = 1;
- b5 = 0;
- break;
- case 3:
- b7 = 1;
- b6 = 0;
- b5 = 1;
- break;
- }
-
- if (mirror)
- b6 = !b6;
-
- mode &= ~((1<<7) | (1<<6) | (1<<5));
- mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
-
- return taal_dcs_write_1(td, MIPI_DCS_SET_ADDRESS_MODE, mode);
-}
-
static int taal_set_update_window(struct taal_data *td,
u16 x, u16 y, u16 w, u16 h)
{
@@ -515,15 +410,8 @@ static const struct backlight_ops taal_bl_ops = {
static void taal_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-
- if (td->rotate == 0 || td->rotate == 2) {
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
- } else {
- *yres = dssdev->panel.timings.x_res;
- *xres = dssdev->panel.timings.y_res;
- }
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
}
static ssize_t taal_num_errors_show(struct device *dev,
@@ -845,17 +733,14 @@ static void taal_hw_reset(struct omap_dss_device *dssdev)
return;
gpio_set_value(td->reset_gpio, 1);
- if (td->panel_config->reset_sequence.high)
- udelay(td->panel_config->reset_sequence.high);
+ udelay(10);
/* reset the panel */
gpio_set_value(td->reset_gpio, 0);
/* assert reset */
- if (td->panel_config->reset_sequence.low)
- udelay(td->panel_config->reset_sequence.low);
+ udelay(10);
gpio_set_value(td->reset_gpio, 1);
/* wait after releasing reset */
- if (td->panel_config->sleep.hw_reset)
- msleep(td->panel_config->sleep.hw_reset);
+ msleep(5);
}
static void taal_probe_pdata(struct taal_data *td,
@@ -881,8 +766,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
struct backlight_properties props;
struct taal_data *td;
struct backlight_device *bldev = NULL;
- int r, i;
- const char *panel_name;
+ int r;
dev_dbg(&dssdev->dev, "probe\n");
@@ -897,26 +781,13 @@ static int taal_probe(struct omap_dss_device *dssdev)
const struct nokia_dsi_panel_data *pdata = dssdev->data;
taal_probe_pdata(td, pdata);
-
- panel_name = pdata->name;
} else {
return -ENODEV;
}
- if (panel_name == NULL)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(panel_configs); i++) {
- if (strcmp(panel_name, panel_configs[i].name) == 0) {
- td->panel_config = &panel_configs[i];
- break;
- }
- }
-
- if (!td->panel_config)
- return -EINVAL;
-
- dssdev->panel.timings = td->panel_config->timings;
+ dssdev->panel.timings.x_res = 864;
+ dssdev->panel.timings.y_res = 480;
+ dssdev->panel.timings.pixel_clock = DIV_ROUND_UP(864 * 480 * 60, 1000);
dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -1049,6 +920,15 @@ static int taal_power_on(struct omap_dss_device *dssdev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
u8 id1, id2, id3;
int r;
+ struct omap_dss_dsi_config dsi_config = {
+ .mode = OMAP_DSS_DSI_CMD_MODE,
+ .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
+ .timings = &dssdev->panel.timings,
+ .hs_clk_min = 150000000,
+ .hs_clk_max = 300000000,
+ .lp_clk_min = 7000000,
+ .lp_clk_max = 10000000,
+ };
r = omapdss_dsi_configure_pins(dssdev, &td->pin_config);
if (r) {
@@ -1056,14 +936,9 @@ static int taal_power_on(struct omap_dss_device *dssdev)
goto err0;
};
- omapdss_dsi_set_size(dssdev, dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
- omapdss_dsi_set_pixel_format(dssdev, OMAP_DSS_DSI_FMT_RGB888);
- omapdss_dsi_set_operation_mode(dssdev, OMAP_DSS_DSI_CMD_MODE);
-
- r = omapdss_dsi_set_clocks(dssdev, 216000000, 10000000);
+ r = omapdss_dsi_set_config(dssdev, &dsi_config);
if (r) {
- dev_err(&dssdev->dev, "failed to set HS and LP clocks\n");
+ dev_err(&dssdev->dev, "failed to configure DSI\n");
goto err0;
}
@@ -1086,8 +961,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
goto err;
/* on early Taal revisions CABC is broken */
- if (td->panel_config->type == PANEL_TAAL &&
- (id2 == 0x00 || id2 == 0xff || id2 == 0x81))
+ if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
td->cabc_broken = true;
r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff);
@@ -1104,10 +978,6 @@ static int taal_power_on(struct omap_dss_device *dssdev)
if (r)
goto err;
- r = taal_set_addr_mode(td, td->rotate, td->mirror);
- if (r)
- goto err;
-
if (!td->cabc_broken) {
r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode);
if (r)
@@ -1129,8 +999,8 @@ static int taal_power_on(struct omap_dss_device *dssdev)
td->enabled = 1;
if (!td->intro_printed) {
- dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n",
- td->panel_config->name, id1, id2, id3);
+ dev_info(&dssdev->dev, "panel revision %02x.%02x.%02x\n",
+ id1, id2, id3);
if (td->cabc_broken)
dev_info(&dssdev->dev,
"old Taal version, CABC disabled\n");
@@ -1311,8 +1181,8 @@ static int taal_update(struct omap_dss_device *dssdev,
/* XXX no need to send this every frame, but dsi break if not done */
r = taal_set_update_window(td, 0, 0,
- td->panel_config->timings.x_res,
- td->panel_config->timings.y_res);
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
if (r)
goto err;
@@ -1365,8 +1235,8 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
if (!gpio_is_valid(td->ext_te_gpio))
omapdss_dsi_enable_te(dssdev, enable);
- if (td->panel_config->sleep.enable_te)
- msleep(td->panel_config->sleep.enable_te);
+ /* possible panel bug */
+ msleep(100);
return r;
}
@@ -1419,112 +1289,6 @@ static int taal_get_te(struct omap_dss_device *dssdev)
return r;
}
-static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- u16 dw, dh;
- int r;
-
- dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
-
- mutex_lock(&td->lock);
-
- if (td->rotate == rotate)
- goto end;
-
- dsi_bus_lock(dssdev);
-
- if (td->enabled) {
- r = taal_wake_up(dssdev);
- if (r)
- goto err;
-
- r = taal_set_addr_mode(td, rotate, td->mirror);
- if (r)
- goto err;
- }
-
- if (rotate == 0 || rotate == 2) {
- dw = dssdev->panel.timings.x_res;
- dh = dssdev->panel.timings.y_res;
- } else {
- dw = dssdev->panel.timings.y_res;
- dh = dssdev->panel.timings.x_res;
- }
-
- omapdss_dsi_set_size(dssdev, dw, dh);
-
- td->rotate = rotate;
-
- dsi_bus_unlock(dssdev);
-end:
- mutex_unlock(&td->lock);
- return 0;
-err:
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
- return r;
-}
-
-static u8 taal_get_rotate(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&td->lock);
- r = td->rotate;
- mutex_unlock(&td->lock);
-
- return r;
-}
-
-static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- dev_dbg(&dssdev->dev, "mirror %d\n", enable);
-
- mutex_lock(&td->lock);
-
- if (td->mirror == enable)
- goto end;
-
- dsi_bus_lock(dssdev);
- if (td->enabled) {
- r = taal_wake_up(dssdev);
- if (r)
- goto err;
-
- r = taal_set_addr_mode(td, td->rotate, enable);
- if (r)
- goto err;
- }
-
- td->mirror = enable;
-
- dsi_bus_unlock(dssdev);
-end:
- mutex_unlock(&td->lock);
- return 0;
-err:
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
- return r;
-}
-
-static bool taal_get_mirror(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&td->lock);
- r = td->mirror;
- mutex_unlock(&td->lock);
-
- return r;
-}
-
static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
@@ -1758,10 +1522,6 @@ static struct omap_dss_driver taal_driver = {
.enable_te = taal_enable_te,
.get_te = taal_get_te,
- .set_rotate = taal_rotate,
- .get_rotate = taal_get_rotate,
- .set_mirror = taal_mirror,
- .get_mirror = taal_get_mirror,
.run_test = taal_run_test,
.memory_read = taal_memory_read,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 8281baa..46039c4 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -24,7 +24,7 @@
#include <linux/gpio.h>
#include <drm/drm_edid.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
static const struct omap_video_timings tfp410_default_timings = {
.x_res = 640,
@@ -135,7 +135,7 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
if (!adapter) {
dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
i2c_bus_num);
- return -EINVAL;
+ return -EPROBE_DEFER;
}
ddata->i2c_adapter = adapter;
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 048c983..abf2bc4 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
@@ -278,9 +279,14 @@ static const struct omap_video_timings tpo_td043_timings = {
.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
+static inline struct panel_tpo_td043_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+ return (struct panel_tpo_td043_data *) dssdev->data;
+}
+
static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
{
- int nreset_gpio = tpo_td043->nreset_gpio;
int r;
if (tpo_td043->powered_on)
@@ -293,8 +299,8 @@ static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
/* wait for panel to stabilize */
msleep(160);
- if (gpio_is_valid(nreset_gpio))
- gpio_set_value(nreset_gpio, 1);
+ if (gpio_is_valid(tpo_td043->nreset_gpio))
+ gpio_set_value(tpo_td043->nreset_gpio, 1);
tpo_td043_write(tpo_td043->spi, 2,
TPO_R02_MODE(tpo_td043->mode) | TPO_R02_NCLK_RISING);
@@ -311,16 +317,14 @@ static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
static void tpo_td043_power_off(struct tpo_td043_device *tpo_td043)
{
- int nreset_gpio = tpo_td043->nreset_gpio;
-
if (!tpo_td043->powered_on)
return;
tpo_td043_write(tpo_td043->spi, 3,
TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
- if (gpio_is_valid(nreset_gpio))
- gpio_set_value(nreset_gpio, 0);
+ if (gpio_is_valid(tpo_td043->nreset_gpio))
+ gpio_set_value(tpo_td043->nreset_gpio, 0);
/* wait for at least 2 vsyncs before cutting off power */
msleep(50);
@@ -347,12 +351,6 @@ static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
if (r)
goto err0;
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
- }
-
/*
* If we are resuming from system suspend, SPI clocks might not be
* enabled yet, so we'll program the LCD from SPI PM resume callback.
@@ -379,9 +377,6 @@ static void tpo_td043_disable_dss(struct omap_dss_device *dssdev)
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
omapdss_dpi_display_disable(dssdev);
if (!tpo_td043->spi_suspended)
@@ -407,7 +402,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
static int tpo_td043_probe(struct omap_dss_device *dssdev)
{
struct tpo_td043_device *tpo_td043 = g_tpo_td043;
- int nreset_gpio = dssdev->reset_gpio;
+ struct panel_tpo_td043_data *pdata = get_panel_data(dssdev);
int ret = 0;
dev_dbg(&dssdev->dev, "probe\n");
@@ -417,6 +412,11 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
return -ENODEV;
}
+ if (!pdata)
+ return -EINVAL;
+
+ tpo_td043->nreset_gpio = pdata->nreset_gpio;
+
dssdev->panel.timings = tpo_td043_timings;
dssdev->ctrl.pixel_size = 24;
@@ -430,9 +430,10 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
goto fail_regulator;
}
- if (gpio_is_valid(nreset_gpio)) {
- ret = gpio_request_one(nreset_gpio, GPIOF_OUT_INIT_LOW,
- "lcd reset");
+ if (gpio_is_valid(tpo_td043->nreset_gpio)) {
+ ret = devm_gpio_request_one(&dssdev->dev,
+ tpo_td043->nreset_gpio, GPIOF_OUT_INIT_LOW,
+ "lcd reset");
if (ret < 0) {
dev_err(&dssdev->dev, "couldn't request reset GPIO\n");
goto fail_gpio_req;
@@ -457,14 +458,11 @@ fail_regulator:
static void tpo_td043_remove(struct omap_dss_device *dssdev)
{
struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
- int nreset_gpio = dssdev->reset_gpio;
dev_dbg(&dssdev->dev, "remove\n");
sysfs_remove_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
regulator_put(tpo_td043->vcc_reg);
- if (gpio_is_valid(nreset_gpio))
- gpio_free(nreset_gpio);
}
static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
@@ -527,7 +525,6 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
return -ENOMEM;
tpo_td043->spi = spi;
- tpo_td043->nreset_gpio = dssdev->reset_gpio;
dev_set_drvdata(&spi->dev, tpo_td043);
g_tpo_td043 = tpo_td043;
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index d446bdf..a4b356a 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -435,20 +435,27 @@ static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_man
static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
- struct omap_dss_device *dssdev = mgr->get_device(mgr);
u32 irq;
int r;
+ if (mgr->output == NULL)
+ return -ENODEV;
+
r = dispc_runtime_get();
if (r)
return r;
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+ switch (mgr->output->id) {
+ case OMAP_DSS_OUTPUT_VENC:
irq = DISPC_IRQ_EVSYNC_ODD;
- else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+ break;
+ case OMAP_DSS_OUTPUT_HDMI:
irq = DISPC_IRQ_EVSYNC_EVEN;
- else
+ break;
+ default:
irq = dispc_mgr_get_vsync_irq(mgr->id);
+ break;
+ }
r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index f8779d4..60cc6fe 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -181,10 +181,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
write, &dss_debug_fops);
- if (IS_ERR(d))
- return PTR_ERR(d);
-
- return 0;
+ return PTR_RET(d);
}
#else /* CONFIG_OMAP2_DSS_DEBUGFS */
static inline int dss_initialize_debugfs(void)
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 05ff2b9..b33b016 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -69,6 +69,8 @@ struct dispc_features {
u8 mgr_height_start;
u16 mgr_width_max;
u16 mgr_height_max;
+ unsigned long max_lcd_pclk;
+ unsigned long max_tv_pclk;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
@@ -85,6 +87,9 @@ struct dispc_features {
/* no DISPC_IRQ_FRAMEDONETV on this SoC */
bool no_framedone_tv:1;
+
+ /* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
+ bool mstandby_workaround:1;
};
#define DISPC_MAX_NR_FIFOS 5
@@ -97,6 +102,8 @@ static struct {
int irq;
+ unsigned long core_clk_rate;
+
u32 fifo_size[DISPC_MAX_NR_FIFOS];
/* maps which plane is using a fifo. fifo-id -> plane-id */
int fifo_assignment[DISPC_MAX_NR_FIFOS];
@@ -1584,6 +1591,7 @@ static void dispc_ovl_set_scaling(enum omap_plane plane,
}
static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
+ enum omap_dss_rotation_type rotation_type,
bool mirroring, enum omap_color_mode color_mode)
{
bool row_repeat = false;
@@ -1634,6 +1642,15 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
if (dss_has_feature(FEAT_ROWREPEATENABLE))
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
+
+ if (color_mode == OMAP_DSS_COLOR_NV12) {
+ bool doublestride = (rotation_type == OMAP_DSS_ROT_TILER) &&
+ (rotation == OMAP_DSS_ROT_0 ||
+ rotation == OMAP_DSS_ROT_180);
+ /* DOUBLESTRIDE */
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
+ }
+
}
static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -2512,7 +2529,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
dispc_ovl_set_vid_color_conv(plane, cconv);
}
- dispc_ovl_set_rotation_attrs(plane, rotation, mirror, color_mode);
+ dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, mirror,
+ color_mode);
dispc_ovl_set_zorder(plane, caps, zorder);
dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
@@ -2823,6 +2841,15 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
return true;
}
+static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
+ unsigned long pclk)
+{
+ if (dss_mgr_is_lcd(channel))
+ return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ else
+ return pclk <= dispc.feat->max_tv_pclk ? true : false;
+}
+
bool dispc_mgr_timings_ok(enum omap_channel channel,
const struct omap_video_timings *timings)
{
@@ -2830,11 +2857,13 @@ bool dispc_mgr_timings_ok(enum omap_channel channel,
timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
- if (dss_mgr_is_lcd(channel))
- timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw,
- timings->hfp, timings->hbp,
- timings->vsw, timings->vfp,
- timings->vbp);
+ timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixel_clock * 1000);
+
+ if (dss_mgr_is_lcd(channel)) {
+ timings_ok &= _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+ timings->hbp, timings->vsw, timings->vfp,
+ timings->vbp);
+ }
return timings_ok;
}
@@ -2951,6 +2980,10 @@ static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+
+ if (dss_has_feature(FEAT_CORE_CLK_DIV) == false &&
+ channel == OMAP_DSS_CHANNEL_LCD)
+ dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
}
static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -3056,15 +3089,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
unsigned long dispc_core_clk_rate(void)
{
- int lcd;
- unsigned long fclk = dispc_fclk_rate();
-
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
- lcd = REG_GET(DISPC_DIVISOR, 23, 16);
- else
- lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
-
- return fclk / lcd;
+ return dispc.core_clk_rate;
}
static unsigned long dispc_plane_pclk_rate(enum omap_plane plane)
@@ -3313,67 +3338,79 @@ static void dispc_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-/* with fck as input clock rate, find dispc dividers that produce req_pck */
-void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
+/* calculate clock rates using dividers in cinfo */
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo)
{
- u16 pcd_min, pcd_max;
- unsigned long best_pck;
- u16 best_ld, cur_ld;
- u16 best_pd, cur_pd;
+ if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
+ return -EINVAL;
+ if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
+ return -EINVAL;
- pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
- pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
+ cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
+ cinfo->pck = cinfo->lck / cinfo->pck_div;
- best_pck = 0;
- best_ld = 0;
- best_pd = 0;
+ return 0;
+}
- for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
- unsigned long lck = fck / cur_ld;
+bool dispc_div_calc(unsigned long dispc,
+ unsigned long pck_min, unsigned long pck_max,
+ dispc_div_calc_func func, void *data)
+{
+ int lckd, lckd_start, lckd_stop;
+ int pckd, pckd_start, pckd_stop;
+ unsigned long pck, lck;
+ unsigned long lck_max;
+ unsigned long pckd_hw_min, pckd_hw_max;
+ unsigned min_fck_per_pck;
+ unsigned long fck;
- for (cur_pd = pcd_min; cur_pd <= pcd_max; ++cur_pd) {
- unsigned long pck = lck / cur_pd;
- long old_delta = abs(best_pck - req_pck);
- long new_delta = abs(pck - req_pck);
+#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK
+ min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+#else
+ min_fck_per_pck = 0;
+#endif
- if (best_pck == 0 || new_delta < old_delta) {
- best_pck = pck;
- best_ld = cur_ld;
- best_pd = cur_pd;
+ pckd_hw_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
+ pckd_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
- if (pck == req_pck)
- goto found;
- }
+ lck_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- if (pck < req_pck)
- break;
- }
+ pck_min = pck_min ? pck_min : 1;
+ pck_max = pck_max ? pck_max : ULONG_MAX;
- if (lck / pcd_min < req_pck)
- break;
- }
+ lckd_start = max(DIV_ROUND_UP(dispc, lck_max), 1ul);
+ lckd_stop = min(dispc / pck_min, 255ul);
-found:
- cinfo->lck_div = best_ld;
- cinfo->pck_div = best_pd;
- cinfo->lck = fck / cinfo->lck_div;
- cinfo->pck = cinfo->lck / cinfo->pck_div;
-}
+ for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) {
+ lck = dispc / lckd;
-/* calculate clock rates using dividers in cinfo */
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
- struct dispc_clock_info *cinfo)
-{
- if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
- return -EINVAL;
- if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
- return -EINVAL;
+ pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min);
+ pckd_stop = min(lck / pck_min, pckd_hw_max);
- cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
- cinfo->pck = cinfo->lck / cinfo->pck_div;
+ for (pckd = pckd_start; pckd <= pckd_stop; ++pckd) {
+ pck = lck / pckd;
- return 0;
+ /*
+ * For OMAP2/3 the DISPC fclk is the same as LCD's logic
+ * clock, which means we're configuring DISPC fclk here
+ * also. Thus we need to use the calculated lck. For
+ * OMAP4+ the DISPC fclk is a separate clock.
+ */
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ fck = dispc_core_clk_rate();
+ else
+ fck = lck;
+
+ if (fck < pck * min_fck_per_pck)
+ continue;
+
+ if (func(lckd, pckd, lck, pck, data))
+ return true;
+ }
+ }
+
+ return false;
}
void dispc_mgr_set_clock_div(enum omap_channel channel,
@@ -3451,6 +3488,8 @@ static void _omap_dispc_initial_config(void)
l = FLD_MOD(l, 1, 0, 0);
l = FLD_MOD(l, 1, 23, 16);
dispc_write_reg(DISPC_DIVISOR, l);
+
+ dispc.core_clk_rate = dispc_fclk_rate();
}
/* FUNCGATED */
@@ -3466,6 +3505,9 @@ static void _omap_dispc_initial_config(void)
dispc_configure_burst_sizes();
dispc_ovl_enable_zorder_planes();
+
+ if (dispc.feat->mstandby_workaround)
+ REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
}
static const struct dispc_features omap24xx_dispc_feats __initconst = {
@@ -3479,6 +3521,7 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .max_lcd_pclk = 66500000,
.calc_scaling = dispc_ovl_calc_scaling_24xx,
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
@@ -3496,6 +3539,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .max_lcd_pclk = 173000000,
+ .max_tv_pclk = 59000000,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
@@ -3513,6 +3558,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .max_lcd_pclk = 173000000,
+ .max_tv_pclk = 59000000,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
@@ -3530,6 +3577,8 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .max_lcd_pclk = 170000000,
+ .max_tv_pclk = 185625000,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
@@ -3547,10 +3596,13 @@ static const struct dispc_features omap54xx_dispc_feats __initconst = {
.mgr_height_start = 27,
.mgr_width_max = 4096,
.mgr_height_max = 4096,
+ .max_lcd_pclk = 170000000,
+ .max_tv_pclk = 186000000,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
.gfx_fifo_workaround = true,
+ .mstandby_workaround = true,
};
static int __init dispc_init_features(struct platform_device *pdev)
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 222363c..de4863d 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -39,6 +39,7 @@
#define DISPC_GLOBAL_BUFFER 0x0800
#define DISPC_CONTROL3 0x0848
#define DISPC_CONFIG3 0x084C
+#define DISPC_MSTANDBY_CTRL 0x0858
/* DISPC overlay registers */
#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 4af136a..757b57f 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -63,15 +63,29 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
case OMAPDSS_VER_OMAP3630:
case OMAPDSS_VER_AM35xx:
return NULL;
- default:
- break;
- }
- switch (channel) {
- case OMAP_DSS_CHANNEL_LCD:
- return dsi_get_dsidev_from_id(0);
- case OMAP_DSS_CHANNEL_LCD2:
- return dsi_get_dsidev_from_id(1);
+ case OMAPDSS_VER_OMAP4430_ES1:
+ case OMAPDSS_VER_OMAP4430_ES2:
+ case OMAPDSS_VER_OMAP4:
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return dsi_get_dsidev_from_id(0);
+ case OMAP_DSS_CHANNEL_LCD2:
+ return dsi_get_dsidev_from_id(1);
+ default:
+ return NULL;
+ }
+
+ case OMAPDSS_VER_OMAP5:
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return dsi_get_dsidev_from_id(0);
+ case OMAP_DSS_CHANNEL_LCD3:
+ return dsi_get_dsidev_from_id(1);
+ default:
+ return NULL;
+ }
+
default:
return NULL;
}
@@ -91,75 +105,211 @@ static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
}
}
-static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
+struct dpi_clk_calc_ctx {
+ struct platform_device *dsidev;
+
+ /* inputs */
+
+ unsigned long pck_min, pck_max;
+
+ /* outputs */
+
+ struct dsi_clock_info dsi_cinfo;
+ struct dss_clock_info dss_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+};
+
+static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+ unsigned long pck, void *data)
+{
+ struct dpi_clk_calc_ctx *ctx = data;
+
+ /*
+ * Odd dividers give us uneven duty cycle, causing problem when level
+ * shifted. So skip all odd dividers when the pixel clock is on the
+ * higher side.
+ */
+ if (ctx->pck_min >= 1000000) {
+ if (lckd > 1 && lckd % 2 != 0)
+ return false;
+
+ if (pckd > 1 && pckd % 2 != 0)
+ return false;
+ }
+
+ ctx->dispc_cinfo.lck_div = lckd;
+ ctx->dispc_cinfo.pck_div = pckd;
+ ctx->dispc_cinfo.lck = lck;
+ ctx->dispc_cinfo.pck = pck;
+
+ return true;
+}
+
+
+static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+ void *data)
+{
+ struct dpi_clk_calc_ctx *ctx = data;
+
+ /*
+ * Odd dividers give us uneven duty cycle, causing problem when level
+ * shifted. So skip all odd dividers when the pixel clock is on the
+ * higher side.
+ */
+ if (regm_dispc > 1 && regm_dispc % 2 != 0 && ctx->pck_min >= 1000000)
+ return false;
+
+ ctx->dsi_cinfo.regm_dispc = regm_dispc;
+ ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+
+ return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
+}
+
+
+static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
+ unsigned long pll,
+ void *data)
+{
+ struct dpi_clk_calc_ctx *ctx = data;
+
+ ctx->dsi_cinfo.regn = regn;
+ ctx->dsi_cinfo.regm = regm;
+ ctx->dsi_cinfo.fint = fint;
+ ctx->dsi_cinfo.clkin4ddr = pll;
+
+ return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->pck_min,
+ dpi_calc_hsdiv_cb, ctx);
+}
+
+static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+{
+ struct dpi_clk_calc_ctx *ctx = data;
+
+ ctx->dss_cinfo.fck = fck;
+ ctx->dss_cinfo.fck_div = fckd;
+
+ return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
+}
+
+static bool dpi_dsi_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+{
+ unsigned long clkin;
+ unsigned long pll_min, pll_max;
+
+ clkin = dsi_get_pll_clkin(dpi.dsidev);
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dsidev = dpi.dsidev;
+ ctx->pck_min = pck - 1000;
+ ctx->pck_max = pck + 1000;
+ ctx->dsi_cinfo.clkin = clkin;
+
+ pll_min = 0;
+ pll_max = 0;
+
+ return dsi_pll_calc(dpi.dsidev, clkin,
+ pll_min, pll_max,
+ dpi_calc_pll_cb, ctx);
+}
+
+static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+{
+ int i;
+
+ /*
+ * DSS fck gives us very few possibilities, so finding a good pixel
+ * clock may not be possible. We try multiple times to find the clock,
+ * each time widening the pixel clock range we look for, up to
+ * +/- ~15MHz.
+ */
+
+ for (i = 0; i < 25; ++i) {
+ bool ok;
+
+ memset(ctx, 0, sizeof(*ctx));
+ if (pck > 1000 * i * i * i)
+ ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
+ else
+ ctx->pck_min = 0;
+ ctx->pck_max = pck + 1000 * i * i * i;
+
+ ok = dss_div_calc(ctx->pck_min, dpi_calc_dss_cb, ctx);
+ if (ok)
+ return ok;
+ }
+
+ return false;
+}
+
+
+
+static int dpi_set_dsi_clk(enum omap_channel channel,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
- struct dsi_clock_info dsi_cinfo;
- struct dispc_clock_info dispc_cinfo;
+ struct dpi_clk_calc_ctx ctx;
int r;
+ bool ok;
- r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo,
- &dispc_cinfo);
- if (r)
- return r;
+ ok = dpi_dsi_clk_calc(pck_req, &ctx);
+ if (!ok)
+ return -EINVAL;
- r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo);
+ r = dsi_pll_set_clock_div(dpi.dsidev, &ctx.dsi_cinfo);
if (r)
return r;
- dss_select_lcd_clk_source(mgr->id,
- dpi_get_alt_clk_src(mgr->id));
+ dss_select_lcd_clk_source(channel,
+ dpi_get_alt_clk_src(channel));
- dpi.mgr_config.clock_info = dispc_cinfo;
+ dpi.mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
- *lck_div = dispc_cinfo.lck_div;
- *pck_div = dispc_cinfo.pck_div;
+ *fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+ *lck_div = ctx.dispc_cinfo.lck_div;
+ *pck_div = ctx.dispc_cinfo.pck_div;
return 0;
}
-static int dpi_set_dispc_clk(struct omap_dss_device *dssdev,
- unsigned long pck_req, unsigned long *fck, int *lck_div,
- int *pck_div)
+static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
+ int *lck_div, int *pck_div)
{
- struct dss_clock_info dss_cinfo;
- struct dispc_clock_info dispc_cinfo;
+ struct dpi_clk_calc_ctx ctx;
int r;
+ bool ok;
- r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo);
- if (r)
- return r;
+ ok = dpi_dss_clk_calc(pck_req, &ctx);
+ if (!ok)
+ return -EINVAL;
- r = dss_set_clock_div(&dss_cinfo);
+ r = dss_set_clock_div(&ctx.dss_cinfo);
if (r)
return r;
- dpi.mgr_config.clock_info = dispc_cinfo;
+ dpi.mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = dss_cinfo.fck;
- *lck_div = dispc_cinfo.lck_div;
- *pck_div = dispc_cinfo.pck_div;
+ *fck = ctx.dss_cinfo.fck;
+ *lck_div = ctx.dispc_cinfo.lck_div;
+ *pck_div = ctx.dispc_cinfo.pck_div;
return 0;
}
-static int dpi_set_mode(struct omap_dss_device *dssdev)
+static int dpi_set_mode(struct omap_overlay_manager *mgr)
{
struct omap_video_timings *t = &dpi.timings;
- struct omap_overlay_manager *mgr = dssdev->output->manager;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi.dsidev)
- r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck,
+ r = dpi_set_dsi_clk(mgr->id, t->pixel_clock * 1000, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck,
+ r = dpi_set_dispc_clk(t->pixel_clock * 1000, &fck,
&lck_div, &pck_div);
if (r)
return r;
@@ -179,10 +329,8 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
return 0;
}
-static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
+static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
-
dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dpi.mgr_config.stallmode = false;
@@ -197,7 +345,7 @@ static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_output *out = dssdev->output;
+ struct omap_dss_output *out = &dpi.output;
int r;
mutex_lock(&dpi.lock);
@@ -230,7 +378,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dssdev->channel);
+ r = dss_dpi_select_source(out->manager->id);
if (r)
goto err_src_sel;
@@ -244,11 +392,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
goto err_dsi_pll_init;
}
- r = dpi_set_mode(dssdev);
+ r = dpi_set_mode(out->manager);
if (r)
goto err_set_mode;
- dpi_config_lcd_manager(dssdev);
+ dpi_config_lcd_manager(out->manager);
mdelay(2);
@@ -285,7 +433,7 @@ EXPORT_SYMBOL(omapdss_dpi_display_enable);
void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = dpi.output.manager;
mutex_lock(&dpi.lock);
@@ -324,12 +472,12 @@ EXPORT_SYMBOL(omapdss_dpi_set_timings);
int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- int r;
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = dpi.output.manager;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
- struct dispc_clock_info dispc_cinfo;
+ struct dpi_clk_calc_ctx ctx;
+ bool ok;
if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
return -EINVAL;
@@ -338,28 +486,21 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
return -EINVAL;
if (dpi.dsidev) {
- struct dsi_clock_info dsi_cinfo;
- r = dsi_pll_calc_clock_div_pck(dpi.dsidev,
- timings->pixel_clock * 1000,
- &dsi_cinfo, &dispc_cinfo);
+ ok = dpi_dsi_clk_calc(timings->pixel_clock * 1000, &ctx);
+ if (!ok)
+ return -EINVAL;
- if (r)
- return r;
-
- fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+ fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
} else {
- struct dss_clock_info dss_cinfo;
- r = dss_calc_clock_div(timings->pixel_clock * 1000,
- &dss_cinfo, &dispc_cinfo);
+ ok = dpi_dss_clk_calc(timings->pixel_clock * 1000, &ctx);
+ if (!ok)
+ return -EINVAL;
- if (r)
- return r;
-
- fck = dss_cinfo.fck;
+ fck = ctx.dss_cinfo.fck;
}
- lck_div = dispc_cinfo.lck_div;
- pck_div = dispc_cinfo.pck_div;
+ lck_div = ctx.dispc_cinfo.lck_div;
+ pck_div = ctx.dispc_cinfo.pck_div;
pck = fck / lck_div / pck_div / 1000;
@@ -379,7 +520,7 @@ void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
}
EXPORT_SYMBOL(omapdss_dpi_set_data_lines);
-static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
+static int dpi_verify_dsi_pll(struct platform_device *dsidev)
{
int r;
@@ -401,7 +542,37 @@ static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
return 0;
}
-static int __init dpi_init_display(struct omap_dss_device *dssdev)
+/*
+ * Return a hardcoded channel for the DPI output. This should work for
+ * current use cases, but this can be later expanded to either resolve
+ * the channel in some more dynamic manner, or get the channel as a user
+ * parameter.
+ */
+static enum omap_channel dpi_get_channel(void)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP24xx:
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return OMAP_DSS_CHANNEL_LCD;
+
+ case OMAPDSS_VER_OMAP4430_ES1:
+ case OMAPDSS_VER_OMAP4430_ES2:
+ case OMAPDSS_VER_OMAP4:
+ return OMAP_DSS_CHANNEL_LCD2;
+
+ case OMAPDSS_VER_OMAP5:
+ return OMAP_DSS_CHANNEL_LCD3;
+
+ default:
+ DSSWARN("unsupported DSS version\n");
+ return OMAP_DSS_CHANNEL_LCD;
+ }
+}
+
+static int dpi_init_display(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev;
@@ -421,12 +592,7 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
dpi.vdds_dsi_reg = vdds_dsi;
}
- /*
- * XXX We shouldn't need dssdev->channel for this. The dsi pll clock
- * source for DPI is SoC integration detail, not something that should
- * be configured in the dssdev
- */
- dsidev = dpi_get_dsidev(dssdev->channel);
+ dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
if (dsidev && dpi_verify_dsi_pll(dsidev)) {
dsidev = NULL;
@@ -441,7 +607,7 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *dpi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const char *def_disp_name = omapdss_get_default_display_name();
@@ -469,7 +635,7 @@ static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *p
return def_dssdev;
}
-static void __init dpi_probe_pdata(struct platform_device *dpidev)
+static int dpi_probe_pdata(struct platform_device *dpidev)
{
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
@@ -478,11 +644,11 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
plat_dssdev = dpi_find_dssdev(dpidev);
if (!plat_dssdev)
- return;
+ return 0;
dssdev = dss_alloc_and_init_device(&dpidev->dev);
if (!dssdev)
- return;
+ return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
@@ -490,7 +656,7 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
- return;
+ return r;
}
r = omapdss_output_set_device(&dpi.output, dssdev);
@@ -498,7 +664,7 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
dss_put_device(dssdev);
- return;
+ return r;
}
r = dss_add_device(dssdev);
@@ -506,17 +672,21 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
DSSERR("device %s register failed: %d\n", dssdev->name, r);
omapdss_output_unset_device(&dpi.output);
dss_put_device(dssdev);
- return;
+ return r;
}
+
+ return 0;
}
-static void __init dpi_init_output(struct platform_device *pdev)
+static void dpi_init_output(struct platform_device *pdev)
{
struct omap_dss_output *out = &dpi.output;
out->pdev = pdev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->type = OMAP_DISPLAY_TYPE_DPI;
+ out->name = "dpi.0";
+ out->dispc_channel = dpi_get_channel();
dss_register_output(out);
}
@@ -528,13 +698,19 @@ static void __exit dpi_uninit_output(struct platform_device *pdev)
dss_unregister_output(out);
}
-static int __init omap_dpi_probe(struct platform_device *pdev)
+static int omap_dpi_probe(struct platform_device *pdev)
{
+ int r;
+
mutex_init(&dpi.lock);
dpi_init_output(pdev);
- dpi_probe_pdata(pdev);
+ r = dpi_probe_pdata(pdev);
+ if (r) {
+ dpi_uninit_output(pdev);
+ return r;
+ }
return 0;
}
@@ -549,6 +725,7 @@ static int __exit omap_dpi_remove(struct platform_device *pdev)
}
static struct platform_driver omap_dpi_driver = {
+ .probe = omap_dpi_probe,
.remove = __exit_p(omap_dpi_remove),
.driver = {
.name = "omapdss_dpi",
@@ -558,7 +735,7 @@ static struct platform_driver omap_dpi_driver = {
int __init dpi_init_platform_driver(void)
{
- return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
+ return platform_driver_register(&omap_dpi_driver);
}
void __exit dpi_uninit_platform_driver(void)
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 28d41d1..a73dedc 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -200,6 +200,11 @@ struct dsi_reg { u16 idx; };
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+static int dsi_display_init_dispc(struct platform_device *dsidev,
+ struct omap_overlay_manager *mgr);
+static void dsi_display_uninit_dispc(struct platform_device *dsidev,
+ struct omap_overlay_manager *mgr);
+
#define DSI_MAX_NR_ISRS 2
#define DSI_MAX_NR_LANES 5
@@ -250,6 +255,24 @@ struct dsi_isr_tables {
struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
};
+struct dsi_clk_calc_ctx {
+ struct platform_device *dsidev;
+
+ /* inputs */
+
+ const struct omap_dss_dsi_config *config;
+
+ unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+ /* outputs */
+
+ struct dsi_clock_info dsi_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+
+ struct omap_video_timings dispc_vm;
+ struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
@@ -261,6 +284,9 @@ struct dsi_data {
struct clk *dss_clk;
struct clk *sys_clk;
+ struct dispc_clock_info user_dispc_cinfo;
+ struct dsi_clock_info user_dsi_cinfo;
+
struct dsi_clock_info current_cinfo;
bool vdds_dsi_enabled;
@@ -324,6 +350,7 @@ struct dsi_data {
unsigned long lpdiv_max;
unsigned num_lanes_supported;
+ unsigned line_buffer_size;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
unsigned num_lanes_used;
@@ -1192,15 +1219,33 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
return r;
}
-static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
+static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
+ unsigned long lp_clk_min, unsigned long lp_clk_max)
+{
+ unsigned long dsi_fclk = cinfo->dsi_pll_hsdiv_dsi_clk;
+ unsigned lp_clk_div;
+ unsigned long lp_clk;
+
+ lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
+ lp_clk = dsi_fclk / 2 / lp_clk_div;
+
+ if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
+ return -EINVAL;
+
+ cinfo->lp_clk_div = lp_clk_div;
+ cinfo->lp_clk = lp_clk;
+
+ return 0;
+}
+
+static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long dsi_fclk;
unsigned lp_clk_div;
unsigned long lp_clk;
- lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
+ lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div;
if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
return -EINVAL;
@@ -1272,6 +1317,75 @@ static int dsi_pll_power(struct platform_device *dsidev,
return 0;
}
+unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ return clk_get_rate(dsi->sys_clk);
+}
+
+bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
+ unsigned long out_min, dsi_hsdiv_calc_func func, void *data)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int regm, regm_start, regm_stop;
+ unsigned long out_max;
+ unsigned long out;
+
+ out_min = out_min ? out_min : 1;
+ out_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ regm_start = max(DIV_ROUND_UP(pll, out_max), 1ul);
+ regm_stop = min(pll / out_min, dsi->regm_dispc_max);
+
+ for (regm = regm_start; regm <= regm_stop; ++regm) {
+ out = pll / regm;
+
+ if (func(regm, out, data))
+ return true;
+ }
+
+ return false;
+}
+
+bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
+ unsigned long pll_min, unsigned long pll_max,
+ dsi_pll_calc_func func, void *data)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int regn, regn_start, regn_stop;
+ int regm, regm_start, regm_stop;
+ unsigned long fint, pll;
+ const unsigned long pll_hw_max = 1800000000;
+ unsigned long fint_hw_min, fint_hw_max;
+
+ fint_hw_min = dsi->fint_min;
+ fint_hw_max = dsi->fint_max;
+
+ regn_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+ regn_stop = min(clkin / fint_hw_min, dsi->regn_max);
+
+ pll_max = pll_max ? pll_max : ULONG_MAX;
+
+ for (regn = regn_start; regn <= regn_stop; ++regn) {
+ fint = clkin / regn;
+
+ regm_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+ 1ul);
+ regm_stop = min3(pll_max / fint / 2,
+ pll_hw_max / fint / 2,
+ dsi->regm_max);
+
+ for (regm = regm_start; regm <= regm_stop; ++regm) {
+ pll = 2 * regm * fint;
+
+ if (func(regn, regm, fint, pll, data))
+ return true;
+ }
+ }
+
+ return false;
+}
+
/* calculate clock rates using dividers in cinfo */
static int dsi_calc_clock_rates(struct platform_device *dsidev,
struct dsi_clock_info *cinfo)
@@ -1316,192 +1430,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev,
return 0;
}
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
- unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
- struct dispc_clock_info *dispc_cinfo)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct dsi_clock_info cur, best;
- struct dispc_clock_info best_dispc;
- int min_fck_per_pck;
- int match = 0;
- unsigned long dss_sys_clk, max_dss_fck;
-
- dss_sys_clk = clk_get_rate(dsi->sys_clk);
-
- max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
- if (req_pck == dsi->cache_req_pck &&
- dsi->cache_cinfo.clkin == dss_sys_clk) {
- DSSDBG("DSI clock info found from cache\n");
- *dsi_cinfo = dsi->cache_cinfo;
- dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk,
- dispc_cinfo);
- return 0;
- }
-
- min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
- if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > max_dss_fck) {
- DSSERR("Requested pixel clock not possible with the current "
- "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
- "the constraint off.\n");
- min_fck_per_pck = 0;
- }
-
- DSSDBG("dsi_pll_calc\n");
-
-retry:
- memset(&best, 0, sizeof(best));
- memset(&best_dispc, 0, sizeof(best_dispc));
-
- memset(&cur, 0, sizeof(cur));
- cur.clkin = dss_sys_clk;
-
- /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
- /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
- for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
- cur.fint = cur.clkin / cur.regn;
-
- if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
- continue;
-
- /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
- for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
- unsigned long a, b;
-
- a = 2 * cur.regm * (cur.clkin/1000);
- b = cur.regn;
- cur.clkin4ddr = a / b * 1000;
-
- if (cur.clkin4ddr > 1800 * 1000 * 1000)
- break;
-
- /* dsi_pll_hsdiv_dispc_clk(MHz) =
- * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
- for (cur.regm_dispc = 1; cur.regm_dispc <
- dsi->regm_dispc_max; ++cur.regm_dispc) {
- struct dispc_clock_info cur_dispc;
- cur.dsi_pll_hsdiv_dispc_clk =
- cur.clkin4ddr / cur.regm_dispc;
-
- if (cur.regm_dispc > 1 &&
- cur.regm_dispc % 2 != 0 &&
- req_pck >= 1000000)
- continue;
-
- /* this will narrow down the search a bit,
- * but still give pixclocks below what was
- * requested */
- if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
- break;
-
- if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
- continue;
-
- if (min_fck_per_pck &&
- cur.dsi_pll_hsdiv_dispc_clk <
- req_pck * min_fck_per_pck)
- continue;
-
- match = 1;
-
- dispc_find_clk_divs(req_pck,
- cur.dsi_pll_hsdiv_dispc_clk,
- &cur_dispc);
-
- if (abs(cur_dispc.pck - req_pck) <
- abs(best_dispc.pck - req_pck)) {
- best = cur;
- best_dispc = cur_dispc;
-
- if (cur_dispc.pck == req_pck)
- goto found;
- }
- }
- }
- }
-found:
- if (!match) {
- if (min_fck_per_pck) {
- DSSERR("Could not find suitable clock settings.\n"
- "Turning FCK/PCK constraint off and"
- "trying again.\n");
- min_fck_per_pck = 0;
- goto retry;
- }
-
- DSSERR("Could not find suitable clock settings.\n");
-
- return -EINVAL;
- }
-
- /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
- best.regm_dsi = 0;
- best.dsi_pll_hsdiv_dsi_clk = 0;
-
- if (dsi_cinfo)
- *dsi_cinfo = best;
- if (dispc_cinfo)
- *dispc_cinfo = best_dispc;
-
- dsi->cache_req_pck = req_pck;
- dsi->cache_clk_freq = 0;
- dsi->cache_cinfo = best;
-
- return 0;
-}
-
-static int dsi_pll_calc_ddrfreq(struct platform_device *dsidev,
- unsigned long req_clkin4ddr, struct dsi_clock_info *cinfo)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct dsi_clock_info cur, best;
-
- DSSDBG("dsi_pll_calc_ddrfreq\n");
-
- memset(&best, 0, sizeof(best));
- memset(&cur, 0, sizeof(cur));
-
- cur.clkin = clk_get_rate(dsi->sys_clk);
-
- for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
- cur.fint = cur.clkin / cur.regn;
-
- if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
- continue;
-
- /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
- for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
- unsigned long a, b;
-
- a = 2 * cur.regm * (cur.clkin/1000);
- b = cur.regn;
- cur.clkin4ddr = a / b * 1000;
-
- if (cur.clkin4ddr > 1800 * 1000 * 1000)
- break;
-
- if (abs(cur.clkin4ddr - req_clkin4ddr) <
- abs(best.clkin4ddr - req_clkin4ddr)) {
- best = cur;
- DSSDBG("best %ld\n", best.clkin4ddr);
- }
-
- if (cur.clkin4ddr == req_clkin4ddr)
- goto found;
- }
- }
-found:
- if (cinfo)
- *cinfo = best;
-
- return 0;
-}
-
-static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
- struct dsi_clock_info *cinfo)
+static void dsi_pll_calc_dsi_fck(struct dsi_clock_info *cinfo)
{
unsigned long max_dsi_fck;
@@ -1511,90 +1440,6 @@ static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
}
-static int dsi_pll_calc_dispc_fck(struct platform_device *dsidev,
- unsigned long req_pck, struct dsi_clock_info *cinfo,
- struct dispc_clock_info *dispc_cinfo)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned regm_dispc, best_regm_dispc;
- unsigned long dispc_clk, best_dispc_clk;
- int min_fck_per_pck;
- unsigned long max_dss_fck;
- struct dispc_clock_info best_dispc;
- bool match;
-
- max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
- min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
- if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > max_dss_fck) {
- DSSERR("Requested pixel clock not possible with the current "
- "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
- "the constraint off.\n");
- min_fck_per_pck = 0;
- }
-
-retry:
- best_regm_dispc = 0;
- best_dispc_clk = 0;
- memset(&best_dispc, 0, sizeof(best_dispc));
- match = false;
-
- for (regm_dispc = 1; regm_dispc < dsi->regm_dispc_max; ++regm_dispc) {
- struct dispc_clock_info cur_dispc;
-
- dispc_clk = cinfo->clkin4ddr / regm_dispc;
-
- /* this will narrow down the search a bit,
- * but still give pixclocks below what was
- * requested */
- if (dispc_clk < req_pck)
- break;
-
- if (dispc_clk > max_dss_fck)
- continue;
-
- if (min_fck_per_pck && dispc_clk < req_pck * min_fck_per_pck)
- continue;
-
- match = true;
-
- dispc_find_clk_divs(req_pck, dispc_clk, &cur_dispc);
-
- if (abs(cur_dispc.pck - req_pck) <
- abs(best_dispc.pck - req_pck)) {
- best_regm_dispc = regm_dispc;
- best_dispc_clk = dispc_clk;
- best_dispc = cur_dispc;
-
- if (cur_dispc.pck == req_pck)
- goto found;
- }
- }
-
- if (!match) {
- if (min_fck_per_pck) {
- DSSERR("Could not find suitable clock settings.\n"
- "Turning FCK/PCK constraint off and"
- "trying again.\n");
- min_fck_per_pck = 0;
- goto retry;
- }
-
- DSSERR("Could not find suitable clock settings.\n");
-
- return -EINVAL;
- }
-found:
- cinfo->regm_dispc = best_regm_dispc;
- cinfo->dsi_pll_hsdiv_dispc_clk = best_dispc_clk;
-
- *dispc_cinfo = best_dispc;
-
- return 0;
-}
-
int dsi_pll_set_clock_div(struct platform_device *dsidev,
struct dsi_clock_info *cinfo)
{
@@ -2783,6 +2628,7 @@ static int dsi_vc_enable(struct platform_device *dsidev, int channel,
static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
DSSDBG("Initial config of virtual channel %d", channel);
@@ -2807,6 +2653,8 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
+
+ dsi->vc[channel].source = DSI_VC_SOURCE_L4;
}
static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
@@ -3777,13 +3625,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
struct omap_video_timings *timings = &dsi->timings;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
- if (line_buf_size <= timings->x_res * bpp / 8)
+ if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
@@ -3799,18 +3646,22 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
static void dsi_config_vp_sync_events(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- bool vsync_end = dsi->vm_timings.vp_vsync_end;
- bool hsync_end = dsi->vm_timings.vp_hsync_end;
+ bool sync_end;
u32 r;
+ if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
+ sync_end = true;
+ else
+ sync_end = false;
+
r = dsi_read_reg(dsidev, DSI_CTRL);
r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
- r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */
+ r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */
r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
- r = FLD_MOD(r, hsync_end, 18, 18); /* VP_HSYNC_END */
+ r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */
dsi_write_reg(dsidev, DSI_CTRL, r);
}
@@ -3897,9 +3748,8 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
return max(lp_inter, 0);
}
-static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode;
int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
@@ -3910,7 +3760,7 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
struct omap_video_timings *timings = &dsi->timings;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
- int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+ int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1;
int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
@@ -4015,9 +3865,8 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
}
-static int dsi_proto_config(struct omap_dss_device *dssdev)
+static int dsi_proto_config(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
int buswidth = 0;
@@ -4075,7 +3924,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_config_vp_sync_events(dsidev);
dsi_config_blanking_modes(dsidev);
- dsi_config_cmd_mode_interleaving(dssdev);
+ dsi_config_cmd_mode_interleaving(dsidev);
}
dsi_vc_initial_config(dsidev, 0);
@@ -4159,11 +4008,12 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int vfp = dsi->vm_timings.vfp;
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
- bool hsync_end = dsi->vm_timings.vp_hsync_end;
+ bool hsync_end;
struct omap_video_timings *timings = &dsi->timings;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
+ hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
@@ -4266,82 +4116,26 @@ int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omapdss_dsi_configure_pins);
-int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
- unsigned long ddr_clk, unsigned long lp_clk)
-{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct dsi_clock_info cinfo;
- struct dispc_clock_info dispc_cinfo;
- unsigned lp_clk_div;
- unsigned long dsi_fclk;
- int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
- unsigned long pck;
- int r;
-
- DSSDBG("Setting DSI clocks: ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
-
- mutex_lock(&dsi->lock);
-
- /* Calculate PLL output clock */
- r = dsi_pll_calc_ddrfreq(dsidev, ddr_clk * 4, &cinfo);
- if (r)
- goto err;
-
- /* Calculate PLL's DSI clock */
- dsi_pll_calc_dsi_fck(dsidev, &cinfo);
-
- /* Calculate PLL's DISPC clock and pck & lck divs */
- pck = cinfo.clkin4ddr / 16 * (dsi->num_lanes_used - 1) * 8 / bpp;
- DSSDBG("finding dispc dividers for pck %lu\n", pck);
- r = dsi_pll_calc_dispc_fck(dsidev, pck, &cinfo, &dispc_cinfo);
- if (r)
- goto err;
-
- /* Calculate LP clock */
- dsi_fclk = cinfo.dsi_pll_hsdiv_dsi_clk;
- lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk * 2);
-
- dssdev->clocks.dsi.regn = cinfo.regn;
- dssdev->clocks.dsi.regm = cinfo.regm;
- dssdev->clocks.dsi.regm_dispc = cinfo.regm_dispc;
- dssdev->clocks.dsi.regm_dsi = cinfo.regm_dsi;
-
- dssdev->clocks.dsi.lp_clk_div = lp_clk_div;
-
- dssdev->clocks.dispc.channel.lck_div = dispc_cinfo.lck_div;
- dssdev->clocks.dispc.channel.pck_div = dispc_cinfo.pck_div;
-
- dssdev->clocks.dispc.dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK;
-
- dssdev->clocks.dispc.channel.lcd_clk_src =
- dsi->module_id == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
-
- dssdev->clocks.dsi.dsi_fclk_src =
- dsi->module_id == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI;
-
- mutex_unlock(&dsi->lock);
- return 0;
-err:
- mutex_unlock(&dsi->lock);
- return r;
-}
-EXPORT_SYMBOL(omapdss_dsi_set_clocks);
-
int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = dsi->output.manager;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ struct omap_dss_output *out = &dsi->output;
u8 data_type;
u16 word_count;
int r;
+ if (out == NULL || out->manager == NULL) {
+ DSSERR("failed to enable display: no output/manager\n");
+ return -ENODEV;
+ }
+
+ r = dsi_display_init_dispc(dsidev, mgr);
+ if (r)
+ goto err_init_dispc;
+
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
switch (dsi->pix_fmt) {
case OMAP_DSS_DSI_FMT_RGB888:
@@ -4357,8 +4151,8 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
break;
default:
- BUG();
- return -EINVAL;
+ r = -EINVAL;
+ goto err_pix_fmt;
};
dsi_if_enable(dsidev, false);
@@ -4377,16 +4171,20 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
}
r = dss_mgr_enable(mgr);
- if (r) {
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
- }
-
- return r;
- }
+ if (r)
+ goto err_mgr_enable;
return 0;
+
+err_mgr_enable:
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
+ }
+err_pix_fmt:
+ dsi_display_uninit_dispc(dsidev, mgr);
+err_init_dispc:
+ return r;
}
EXPORT_SYMBOL(dsi_enable_video_output);
@@ -4394,7 +4192,7 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = dsi->output.manager;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsidev, false);
@@ -4408,14 +4206,15 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
}
dss_mgr_disable(mgr);
+
+ dsi_display_uninit_dispc(dsidev, mgr);
}
EXPORT_SYMBOL(dsi_disable_video_output);
-static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
+static void dsi_update_screen_dispc(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
+ struct omap_overlay_manager *mgr = dsi->output.manager;
unsigned bytespp;
unsigned bytespl;
unsigned bytespf;
@@ -4425,7 +4224,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
u32 l;
int r;
const unsigned channel = dsi->update_channel;
- const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
+ const unsigned line_buf_size = dsi->line_buffer_size;
u16 w = dsi->timings.x_res;
u16 h = dsi->timings.y_res;
@@ -4571,7 +4370,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->update_bytes = dw * dh *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
- dsi_update_screen_dispc(dssdev);
+ dsi_update_screen_dispc(dsidev);
return 0;
}
@@ -4579,18 +4378,17 @@ EXPORT_SYMBOL(omap_dsi_update);
/* Display funcs */
-static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
+static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dispc_clock_info dispc_cinfo;
int r;
- unsigned long long fck;
+ unsigned long fck;
fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
- dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
- dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
+ dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
+ dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
r = dispc_calc_clock_rates(fck, &dispc_cinfo);
if (r) {
@@ -4603,21 +4401,17 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
return 0;
}
-static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
+static int dsi_display_init_dispc(struct platform_device *dsidev,
+ struct omap_overlay_manager *mgr)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
int r;
- if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
- dsi->timings.hsw = 1;
- dsi->timings.hfp = 1;
- dsi->timings.hbp = 1;
- dsi->timings.vsw = 1;
- dsi->timings.vfp = 0;
- dsi->timings.vbp = 0;
+ dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ?
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
+ if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
r = dss_mgr_register_framedone_handler(mgr,
dsi_framedone_irq_callback, dsidev);
if (r) {
@@ -4645,7 +4439,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
dss_mgr_set_timings(mgr, &dsi->timings);
- r = dsi_configure_dispc_clocks(dssdev);
+ r = dsi_configure_dispc_clocks(dsidev);
if (r)
goto err1;
@@ -4662,30 +4456,30 @@ err1:
dss_mgr_unregister_framedone_handler(mgr,
dsi_framedone_irq_callback, dsidev);
err:
+ dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
return r;
}
-static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
+static void dsi_display_uninit_dispc(struct platform_device *dsidev,
+ struct omap_overlay_manager *mgr)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(mgr,
dsi_framedone_irq_callback, dsidev);
+
+ dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
}
-static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
+static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dsi_clock_info cinfo;
int r;
- cinfo.regn = dssdev->clocks.dsi.regn;
- cinfo.regm = dssdev->clocks.dsi.regm;
- cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
- cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
+ cinfo = dsi->user_dsi_cinfo;
+
r = dsi_calc_clock_rates(dsidev, &cinfo);
if (r) {
DSSERR("Failed to calc dsi clocks\n");
@@ -4701,24 +4495,22 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
return 0;
}
-static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
+static int dsi_display_init_dsi(struct platform_device *dsidev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
int r;
r = dsi_pll_init(dsidev, true, true);
if (r)
goto err0;
- r = dsi_configure_dsi_clocks(dssdev);
+ r = dsi_configure_dsi_clocks(dsidev);
if (r)
goto err1;
- dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
- dss_select_lcd_clk_source(mgr->id,
- dssdev->clocks.dispc.channel.lcd_clk_src);
+ dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
DSSDBG("PLL OK\n");
@@ -4729,12 +4521,12 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
_dsi_print_reset_status(dsidev);
dsi_proto_timings(dsidev);
- dsi_set_lp_clk_divisor(dssdev);
+ dsi_set_lp_clk_divisor(dsidev);
if (1)
_dsi_print_reset_status(dsidev);
- r = dsi_proto_config(dssdev);
+ r = dsi_proto_config(dsidev);
if (r)
goto err3;
@@ -4751,20 +4543,16 @@ err3:
dsi_cio_uninit(dsidev);
err2:
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
- dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
-
err1:
dsi_pll_uninit(dsidev, true);
err0:
return r;
}
-static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
+static void dsi_display_uninit_dsi(struct platform_device *dsidev,
bool disconnect_lanes, bool enter_ulps)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dssdev->output->manager;
if (enter_ulps && !dsi->ulps_enabled)
dsi_enter_ulps(dsidev);
@@ -4777,7 +4565,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_vc_enable(dsidev, 3, 0);
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
- dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsidev);
dsi_pll_uninit(dsidev, disconnect_lanes);
}
@@ -4786,7 +4573,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_dss_output *out = dssdev->output;
int r = 0;
DSSDBG("dsi_display_enable\n");
@@ -4795,12 +4581,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dsi->lock);
- if (out == NULL || out->manager == NULL) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err_start_dev;
- }
-
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
@@ -4815,11 +4595,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
_dsi_initialize_irq(dsidev);
- r = dsi_display_init_dispc(dssdev);
- if (r)
- goto err_init_dispc;
-
- r = dsi_display_init_dsi(dssdev);
+ r = dsi_display_init_dsi(dsidev);
if (r)
goto err_init_dsi;
@@ -4828,8 +4604,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
return 0;
err_init_dsi:
- dsi_display_uninit_dispc(dssdev);
-err_init_dispc:
dsi_enable_pll_clock(dsidev, 0);
dsi_runtime_put(dsidev);
err_get_dsi:
@@ -4858,9 +4632,7 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
dsi_sync_vc(dsidev, 2);
dsi_sync_vc(dsidev, 3);
- dsi_display_uninit_dispc(dssdev);
-
- dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
+ dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
dsi_runtime_put(dsidev);
dsi_enable_pll_clock(dsidev, 0);
@@ -4881,77 +4653,579 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
}
EXPORT_SYMBOL(omapdss_dsi_enable_te);
-void omapdss_dsi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+#ifdef PRINT_VERBOSE_VM_TIMINGS
+static void print_dsi_vm(const char *str,
+ const struct omap_dss_dsi_videomode_timings *t)
+{
+ unsigned long byteclk = t->hsclk / 4;
+ int bl, wc, pps, tot;
+
+ wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
+ pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+ tot = bl + pps;
+
+#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
+
+ pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
+ "%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
+ str,
+ byteclk,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+ bl, pps, tot,
+ TO_DSI_T(t->hss),
+ TO_DSI_T(t->hsa),
+ TO_DSI_T(t->hse),
+ TO_DSI_T(t->hbp),
+ TO_DSI_T(pps),
+ TO_DSI_T(t->hfp),
+
+ TO_DSI_T(bl),
+ TO_DSI_T(pps),
+
+ TO_DSI_T(tot));
+#undef TO_DSI_T
+}
+
+static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+{
+ unsigned long pck = t->pixel_clock * 1000;
+ int hact, bl, tot;
+
+ hact = t->x_res;
+ bl = t->hsw + t->hbp + t->hfp;
+ tot = hact + bl;
+
+#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
+
+ pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
+ "%u/%u/%u/%u = %u + %u = %u\n",
+ str,
+ pck,
+ t->hsw, t->hbp, hact, t->hfp,
+ bl, hact, tot,
+ TO_DISPC_T(t->hsw),
+ TO_DISPC_T(t->hbp),
+ TO_DISPC_T(hact),
+ TO_DISPC_T(t->hfp),
+ TO_DISPC_T(bl),
+ TO_DISPC_T(hact),
+ TO_DISPC_T(tot));
+#undef TO_DISPC_T
+}
+
+/* note: this is not quite accurate */
+static void print_dsi_dispc_vm(const char *str,
+ const struct omap_dss_dsi_videomode_timings *t)
+{
+ struct omap_video_timings vm = { 0 };
+ unsigned long byteclk = t->hsclk / 4;
+ unsigned long pck;
+ u64 dsi_tput;
+ int dsi_hact, dsi_htot;
+
+ dsi_tput = (u64)byteclk * t->ndl * 8;
+ pck = (u32)div64_u64(dsi_tput, t->bitspp);
+ dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+
+ vm.pixel_clock = pck / 1000;
+ vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+ vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
+ vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
+ vm.x_res = t->hact;
+
+ print_dispc_vm(str, &vm);
+}
+#endif /* PRINT_VERBOSE_VM_TIMINGS */
+
+static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+ unsigned long pck, void *data)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_clk_calc_ctx *ctx = data;
+ struct omap_video_timings *t = &ctx->dispc_vm;
- mutex_lock(&dsi->lock);
+ ctx->dispc_cinfo.lck_div = lckd;
+ ctx->dispc_cinfo.pck_div = pckd;
+ ctx->dispc_cinfo.lck = lck;
+ ctx->dispc_cinfo.pck = pck;
- dsi->timings = *timings;
+ *t = *ctx->config->timings;
+ t->pixel_clock = pck / 1000;
+ t->x_res = ctx->config->timings->x_res;
+ t->y_res = ctx->config->timings->y_res;
+ t->hsw = t->hfp = t->hbp = t->vsw = 1;
+ t->vfp = t->vbp = 0;
- mutex_unlock(&dsi->lock);
+ return true;
}
-EXPORT_SYMBOL(omapdss_dsi_set_timings);
-void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
+static bool dsi_cm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+ void *data)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_clk_calc_ctx *ctx = data;
- mutex_lock(&dsi->lock);
+ ctx->dsi_cinfo.regm_dispc = regm_dispc;
+ ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
- dsi->timings.x_res = w;
- dsi->timings.y_res = h;
+ return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
+ dsi_cm_calc_dispc_cb, ctx);
+}
- mutex_unlock(&dsi->lock);
+static bool dsi_cm_calc_pll_cb(int regn, int regm, unsigned long fint,
+ unsigned long pll, void *data)
+{
+ struct dsi_clk_calc_ctx *ctx = data;
+
+ ctx->dsi_cinfo.regn = regn;
+ ctx->dsi_cinfo.regm = regm;
+ ctx->dsi_cinfo.fint = fint;
+ ctx->dsi_cinfo.clkin4ddr = pll;
+
+ return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+ dsi_cm_calc_hsdiv_cb, ctx);
}
-EXPORT_SYMBOL(omapdss_dsi_set_size);
-void omapdss_dsi_set_pixel_format(struct omap_dss_device *dssdev,
- enum omap_dss_dsi_pixel_format fmt)
+static bool dsi_cm_calc(struct dsi_data *dsi,
+ const struct omap_dss_dsi_config *cfg,
+ struct dsi_clk_calc_ctx *ctx)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ unsigned long clkin;
+ int bitspp, ndl;
+ unsigned long pll_min, pll_max;
+ unsigned long pck, txbyteclk;
- mutex_lock(&dsi->lock);
+ clkin = clk_get_rate(dsi->sys_clk);
+ bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ ndl = dsi->num_lanes_used - 1;
- dsi->pix_fmt = fmt;
+ /*
+ * Here we should calculate minimum txbyteclk to be able to send the
+ * frame in time, and also to handle TE. That's not very simple, though,
+ * especially as we go to LP between each pixel packet due to HW
+ * "feature". So let's just estimate very roughly and multiply by 1.5.
+ */
+ pck = cfg->timings->pixel_clock * 1000;
+ pck = pck * 3 / 2;
+ txbyteclk = pck * bitspp / 8 / ndl;
- mutex_unlock(&dsi->lock);
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dsidev = dsi->pdev;
+ ctx->config = cfg;
+ ctx->req_pck_min = pck;
+ ctx->req_pck_nom = pck;
+ ctx->req_pck_max = pck * 3 / 2;
+ ctx->dsi_cinfo.clkin = clkin;
+
+ pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
+ pll_max = cfg->hs_clk_max * 4;
+
+ return dsi_pll_calc(dsi->pdev, clkin,
+ pll_min, pll_max,
+ dsi_cm_calc_pll_cb, ctx);
}
-EXPORT_SYMBOL(omapdss_dsi_set_pixel_format);
-void omapdss_dsi_set_operation_mode(struct omap_dss_device *dssdev,
- enum omap_dss_dsi_mode mode)
+static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ const struct omap_dss_dsi_config *cfg = ctx->config;
+ int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int ndl = dsi->num_lanes_used - 1;
+ unsigned long hsclk = ctx->dsi_cinfo.clkin4ddr / 4;
+ unsigned long byteclk = hsclk / 4;
- mutex_lock(&dsi->lock);
+ unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
+ int xres;
+ int panel_htot, panel_hbl; /* pixels */
+ int dispc_htot, dispc_hbl; /* pixels */
+ int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
+ int hfp, hsa, hbp;
+ const struct omap_video_timings *req_vm;
+ struct omap_video_timings *dispc_vm;
+ struct omap_dss_dsi_videomode_timings *dsi_vm;
+ u64 dsi_tput, dispc_tput;
- dsi->mode = mode;
+ dsi_tput = (u64)byteclk * ndl * 8;
- mutex_unlock(&dsi->lock);
+ req_vm = cfg->timings;
+ req_pck_min = ctx->req_pck_min;
+ req_pck_max = ctx->req_pck_max;
+ req_pck_nom = ctx->req_pck_nom;
+
+ dispc_pck = ctx->dispc_cinfo.pck;
+ dispc_tput = (u64)dispc_pck * bitspp;
+
+ xres = req_vm->x_res;
+
+ panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+ panel_htot = xres + panel_hbl;
+
+ dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
+
+ /*
+ * When there are no line buffers, DISPC and DSI must have the
+ * same tput. Otherwise DISPC tput needs to be higher than DSI's.
+ */
+ if (dsi->line_buffer_size < xres * bitspp / 8) {
+ if (dispc_tput != dsi_tput)
+ return false;
+ } else {
+ if (dispc_tput < dsi_tput)
+ return false;
+ }
+
+ /* DSI tput must be over the min requirement */
+ if (dsi_tput < (u64)bitspp * req_pck_min)
+ return false;
+
+ /* When non-burst mode, DSI tput must be below max requirement. */
+ if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
+ if (dsi_tput > (u64)bitspp * req_pck_max)
+ return false;
+ }
+
+ hss = DIV_ROUND_UP(4, ndl);
+
+ if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
+ if (ndl == 3 && req_vm->hsw == 0)
+ hse = 1;
+ else
+ hse = DIV_ROUND_UP(4, ndl);
+ } else {
+ hse = 0;
+ }
+
+ /* DSI htot to match the panel's nominal pck */
+ dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);
+
+ /* fail if there would be no time for blanking */
+ if (dsi_htot < hss + hse + dsi_hact)
+ return false;
+
+ /* total DSI blanking needed to achieve panel's TL */
+ dsi_hbl = dsi_htot - dsi_hact;
+
+ /* DISPC htot to match the DSI TL */
+ dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);
+
+ /* verify that the DSI and DISPC TLs are the same */
+ if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
+ return false;
+
+ dispc_hbl = dispc_htot - xres;
+
+ /* setup DSI videomode */
+
+ dsi_vm = &ctx->dsi_vm;
+ memset(dsi_vm, 0, sizeof(*dsi_vm));
+
+ dsi_vm->hsclk = hsclk;
+
+ dsi_vm->ndl = ndl;
+ dsi_vm->bitspp = bitspp;
+
+ if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
+ hsa = 0;
+ } else if (ndl == 3 && req_vm->hsw == 0) {
+ hsa = 0;
+ } else {
+ hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+ hsa = max(hsa - hse, 1);
+ }
+
+ hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+ hbp = max(hbp, 1);
+
+ hfp = dsi_hbl - (hss + hsa + hse + hbp);
+ if (hfp < 1) {
+ int t;
+ /* we need to take cycles from hbp */
+
+ t = 1 - hfp;
+ hbp = max(hbp - t, 1);
+ hfp = dsi_hbl - (hss + hsa + hse + hbp);
+
+ if (hfp < 1 && hsa > 0) {
+ /* we need to take cycles from hsa */
+ t = 1 - hfp;
+ hsa = max(hsa - t, 1);
+ hfp = dsi_hbl - (hss + hsa + hse + hbp);
+ }
+ }
+
+ if (hfp < 1)
+ return false;
+
+ dsi_vm->hss = hss;
+ dsi_vm->hsa = hsa;
+ dsi_vm->hse = hse;
+ dsi_vm->hbp = hbp;
+ dsi_vm->hact = xres;
+ dsi_vm->hfp = hfp;
+
+ dsi_vm->vsa = req_vm->vsw;
+ dsi_vm->vbp = req_vm->vbp;
+ dsi_vm->vact = req_vm->y_res;
+ dsi_vm->vfp = req_vm->vfp;
+
+ dsi_vm->trans_mode = cfg->trans_mode;
+
+ dsi_vm->blanking_mode = 0;
+ dsi_vm->hsa_blanking_mode = 1;
+ dsi_vm->hfp_blanking_mode = 1;
+ dsi_vm->hbp_blanking_mode = 1;
+
+ dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
+ dsi_vm->window_sync = 4;
+
+ /* setup DISPC videomode */
+
+ dispc_vm = &ctx->dispc_vm;
+ *dispc_vm = *req_vm;
+ dispc_vm->pixel_clock = dispc_pck / 1000;
+
+ if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
+ hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+ req_pck_nom);
+ hsa = max(hsa, 1);
+ } else {
+ hsa = 1;
+ }
+
+ hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+ hbp = max(hbp, 1);
+
+ hfp = dispc_hbl - hsa - hbp;
+ if (hfp < 1) {
+ int t;
+ /* we need to take cycles from hbp */
+
+ t = 1 - hfp;
+ hbp = max(hbp - t, 1);
+ hfp = dispc_hbl - hsa - hbp;
+
+ if (hfp < 1) {
+ /* we need to take cycles from hsa */
+ t = 1 - hfp;
+ hsa = max(hsa - t, 1);
+ hfp = dispc_hbl - hsa - hbp;
+ }
+ }
+
+ if (hfp < 1)
+ return false;
+
+ dispc_vm->hfp = hfp;
+ dispc_vm->hsw = hsa;
+ dispc_vm->hbp = hbp;
+
+ return true;
}
-EXPORT_SYMBOL(omapdss_dsi_set_operation_mode);
-void omapdss_dsi_set_videomode_timings(struct omap_dss_device *dssdev,
- struct omap_dss_dsi_videomode_timings *timings)
+
+static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+ unsigned long pck, void *data)
+{
+ struct dsi_clk_calc_ctx *ctx = data;
+
+ ctx->dispc_cinfo.lck_div = lckd;
+ ctx->dispc_cinfo.pck_div = pckd;
+ ctx->dispc_cinfo.lck = lck;
+ ctx->dispc_cinfo.pck = pck;
+
+ if (dsi_vm_calc_blanking(ctx) == false)
+ return false;
+
+#ifdef PRINT_VERBOSE_VM_TIMINGS
+ print_dispc_vm("dispc", &ctx->dispc_vm);
+ print_dsi_vm("dsi ", &ctx->dsi_vm);
+ print_dispc_vm("req ", ctx->config->timings);
+ print_dsi_dispc_vm("act ", &ctx->dsi_vm);
+#endif
+
+ return true;
+}
+
+static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+ void *data)
+{
+ struct dsi_clk_calc_ctx *ctx = data;
+ unsigned long pck_max;
+
+ ctx->dsi_cinfo.regm_dispc = regm_dispc;
+ ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+
+ /*
+ * In burst mode we can let the dispc pck be arbitrarily high, but it
+ * limits our scaling abilities. So for now, don't aim too high.
+ */
+
+ if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
+ pck_max = ctx->req_pck_max + 10000000;
+ else
+ pck_max = ctx->req_pck_max;
+
+ return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
+ dsi_vm_calc_dispc_cb, ctx);
+}
+
+static bool dsi_vm_calc_pll_cb(int regn, int regm, unsigned long fint,
+ unsigned long pll, void *data)
+{
+ struct dsi_clk_calc_ctx *ctx = data;
+
+ ctx->dsi_cinfo.regn = regn;
+ ctx->dsi_cinfo.regm = regm;
+ ctx->dsi_cinfo.fint = fint;
+ ctx->dsi_cinfo.clkin4ddr = pll;
+
+ return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+ dsi_vm_calc_hsdiv_cb, ctx);
+}
+
+static bool dsi_vm_calc(struct dsi_data *dsi,
+ const struct omap_dss_dsi_config *cfg,
+ struct dsi_clk_calc_ctx *ctx)
+{
+ const struct omap_video_timings *t = cfg->timings;
+ unsigned long clkin;
+ unsigned long pll_min;
+ unsigned long pll_max;
+ int ndl = dsi->num_lanes_used - 1;
+ int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ unsigned long byteclk_min;
+
+ clkin = clk_get_rate(dsi->sys_clk);
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dsidev = dsi->pdev;
+ ctx->config = cfg;
+
+ ctx->dsi_cinfo.clkin = clkin;
+
+ /* these limits should come from the panel driver */
+ ctx->req_pck_min = t->pixel_clock * 1000 - 1000;
+ ctx->req_pck_nom = t->pixel_clock * 1000;
+ ctx->req_pck_max = t->pixel_clock * 1000 + 1000;
+
+ byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
+ pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
+
+ if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
+ pll_max = cfg->hs_clk_max * 4;
+ } else {
+ unsigned long byteclk_max;
+ byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
+ ndl * 8);
+
+ pll_max = byteclk_max * 4 * 4;
+ }
+
+ return dsi_pll_calc(dsi->pdev, clkin,
+ pll_min, pll_max,
+ dsi_vm_calc_pll_cb, ctx);
+}
+
+int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
+ const struct omap_dss_dsi_config *config)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_clk_calc_ctx ctx;
+ bool ok;
+ int r;
mutex_lock(&dsi->lock);
- dsi->vm_timings = *timings;
+ dsi->pix_fmt = config->pixel_format;
+ dsi->mode = config->mode;
+
+ if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ ok = dsi_vm_calc(dsi, config, &ctx);
+ else
+ ok = dsi_cm_calc(dsi, config, &ctx);
+
+ if (!ok) {
+ DSSERR("failed to find suitable DSI clock settings\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
+
+ r = dsi_lp_clock_calc(&ctx.dsi_cinfo, config->lp_clk_min,
+ config->lp_clk_max);
+ if (r) {
+ DSSERR("failed to find suitable DSI LP clock settings\n");
+ goto err;
+ }
+
+ dsi->user_dsi_cinfo = ctx.dsi_cinfo;
+ dsi->user_dispc_cinfo = ctx.dispc_cinfo;
+
+ dsi->timings = ctx.dispc_vm;
+ dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
+
+ return 0;
+err:
+ mutex_unlock(&dsi->lock);
+
+ return r;
}
-EXPORT_SYMBOL(omapdss_dsi_set_videomode_timings);
+EXPORT_SYMBOL(omapdss_dsi_set_config);
+
+/*
+ * Return a hardcoded channel for the DSI output. This should work for
+ * current use cases, but this can be later expanded to either resolve
+ * the channel in some more dynamic manner, or get the channel as a user
+ * parameter.
+ */
+static enum omap_channel dsi_get_channel(int module_id)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP24xx:
+ DSSWARN("DSI not supported\n");
+ return OMAP_DSS_CHANNEL_LCD;
+
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return OMAP_DSS_CHANNEL_LCD;
+
+ case OMAPDSS_VER_OMAP4430_ES1:
+ case OMAPDSS_VER_OMAP4430_ES2:
+ case OMAPDSS_VER_OMAP4:
+ switch (module_id) {
+ case 0:
+ return OMAP_DSS_CHANNEL_LCD;
+ case 1:
+ return OMAP_DSS_CHANNEL_LCD2;
+ default:
+ DSSWARN("unsupported module id\n");
+ return OMAP_DSS_CHANNEL_LCD;
+ }
+
+ case OMAPDSS_VER_OMAP5:
+ switch (module_id) {
+ case 0:
+ return OMAP_DSS_CHANNEL_LCD;
+ case 1:
+ return OMAP_DSS_CHANNEL_LCD3;
+ default:
+ DSSWARN("unsupported module id\n");
+ return OMAP_DSS_CHANNEL_LCD;
+ }
-static int __init dsi_init_display(struct omap_dss_device *dssdev)
+ default:
+ DSSWARN("unsupported DSS version\n");
+ return OMAP_DSS_CHANNEL_LCD;
+ }
+}
+
+static int dsi_init_display(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev =
dsi_get_dsidev_from_id(dssdev->phy.dsi.module);
@@ -5073,7 +5347,7 @@ static int dsi_get_clocks(struct platform_device *dsidev)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct clk *clk;
- clk = clk_get(&dsidev->dev, "fck");
+ clk = devm_clk_get(&dsidev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get fck\n");
return PTR_ERR(clk);
@@ -5081,11 +5355,9 @@ static int dsi_get_clocks(struct platform_device *dsidev)
dsi->dss_clk = clk;
- clk = clk_get(&dsidev->dev, "sys_clk");
+ clk = devm_clk_get(&dsidev->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
- clk_put(dsi->dss_clk);
- dsi->dss_clk = NULL;
return PTR_ERR(clk);
}
@@ -5094,17 +5366,7 @@ static int dsi_get_clocks(struct platform_device *dsidev)
return 0;
}
-static void dsi_put_clocks(struct platform_device *dsidev)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- if (dsi->dss_clk)
- clk_put(dsi->dss_clk);
- if (dsi->sys_clk)
- clk_put(dsi->sys_clk);
-}
-
-static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *dsi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
@@ -5136,7 +5398,7 @@ static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *p
return def_dssdev;
}
-static void __init dsi_probe_pdata(struct platform_device *dsidev)
+static int dsi_probe_pdata(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *plat_dssdev;
@@ -5146,11 +5408,11 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
plat_dssdev = dsi_find_dssdev(dsidev);
if (!plat_dssdev)
- return;
+ return 0;
dssdev = dss_alloc_and_init_device(&dsidev->dev);
if (!dssdev)
- return;
+ return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
@@ -5158,7 +5420,7 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
- return;
+ return r;
}
r = omapdss_output_set_device(&dsi->output, dssdev);
@@ -5166,7 +5428,7 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
dss_put_device(dssdev);
- return;
+ return r;
}
r = dss_add_device(dssdev);
@@ -5174,11 +5436,13 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
DSSERR("device %s register failed: %d\n", dssdev->name, r);
omapdss_output_unset_device(&dsi->output);
dss_put_device(dssdev);
- return;
+ return r;
}
+
+ return 0;
}
-static void __init dsi_init_output(struct platform_device *dsidev)
+static void dsi_init_output(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_output *out = &dsi->output;
@@ -5188,11 +5452,13 @@ static void __init dsi_init_output(struct platform_device *dsidev)
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
out->type = OMAP_DISPLAY_TYPE_DSI;
+ out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
+ out->dispc_channel = dsi_get_channel(dsi->module_id);
dss_register_output(out);
}
-static void __exit dsi_uninit_output(struct platform_device *dsidev)
+static void dsi_uninit_output(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_output *out = &dsi->output;
@@ -5201,7 +5467,7 @@ static void __exit dsi_uninit_output(struct platform_device *dsidev)
}
/* DSI1 HW IP initialisation */
-static int __init omap_dsihw_probe(struct platform_device *dsidev)
+static int omap_dsihw_probe(struct platform_device *dsidev)
{
u32 rev;
int r, i;
@@ -5293,9 +5559,17 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
else
dsi->num_lanes_supported = 3;
+ dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
+
dsi_init_output(dsidev);
- dsi_probe_pdata(dsidev);
+ r = dsi_probe_pdata(dsidev);
+ if (r) {
+ dsi_runtime_put(dsidev);
+ dsi_uninit_output(dsidev);
+ pm_runtime_disable(&dsidev->dev);
+ return r;
+ }
dsi_runtime_put(dsidev);
@@ -5314,7 +5588,6 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
err_runtime_get:
pm_runtime_disable(&dsidev->dev);
- dsi_put_clocks(dsidev);
return r;
}
@@ -5330,8 +5603,6 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
pm_runtime_disable(&dsidev->dev);
- dsi_put_clocks(dsidev);
-
if (dsi->vdds_dsi_reg != NULL) {
if (dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -5369,6 +5640,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
};
static struct platform_driver omap_dsihw_driver = {
+ .probe = omap_dsihw_probe,
.remove = __exit_p(omap_dsihw_remove),
.driver = {
.name = "omapdss_dsi",
@@ -5379,7 +5651,7 @@ static struct platform_driver omap_dsihw_driver = {
int __init dsi_init_platform_driver(void)
{
- return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
+ return platform_driver_register(&omap_dsihw_driver);
}
void __exit dsi_uninit_platform_driver(void)
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 054c2a2..94f66f9 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -473,6 +473,47 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
return 0;
}
+bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
+{
+ int fckd, fckd_start, fckd_stop;
+ unsigned long fck;
+ unsigned long fck_hw_max;
+ unsigned long fckd_hw_max;
+ unsigned long prate;
+ unsigned m;
+
+ if (dss.dpll4_m4_ck == NULL) {
+ /*
+ * TODO: dss1_fclk can be changed on OMAP2, but the available
+ * dividers are not continuous. We just use the pre-set rate for
+ * now.
+ */
+ fck = clk_get_rate(dss.dss_clk);
+ fckd = 1;
+ return func(fckd, fck, data);
+ }
+
+ fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ fckd_hw_max = dss.feat->fck_div_max;
+
+ m = dss.feat->dss_fck_multiplier;
+ prate = dss_get_dpll4_rate();
+
+ fck_min = fck_min ? fck_min : 1;
+
+ fckd_start = min(prate * m / fck_min, fckd_hw_max);
+ fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul);
+
+ for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
+ fck = prate / fckd * m;
+
+ if (func(fckd, fck, data))
+ return true;
+ }
+
+ return false;
+}
+
int dss_set_clock_div(struct dss_clock_info *cinfo)
{
if (dss.dpll4_m4_ck) {
@@ -482,7 +523,8 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
DSSDBG("dpll4_m4 = %ld\n", prate);
- r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
+ r = clk_set_rate(dss.dpll4_m4_ck,
+ DIV_ROUND_UP(prate, cinfo->fck_div));
if (r)
return r;
} else {
@@ -492,7 +534,9 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
- WARN_ONCE(dss.dss_clk_rate != cinfo->fck, "clk rate mismatch");
+ WARN_ONCE(dss.dss_clk_rate != cinfo->fck,
+ "clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
+ cinfo->fck);
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
@@ -542,121 +586,6 @@ static int dss_setup_default_clock(void)
return 0;
}
-int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
- struct dispc_clock_info *dispc_cinfo)
-{
- unsigned long prate;
- struct dss_clock_info best_dss;
- struct dispc_clock_info best_dispc;
-
- unsigned long fck, max_dss_fck;
-
- u16 fck_div;
-
- int match = 0;
- int min_fck_per_pck;
-
- prate = dss_get_dpll4_rate();
-
- max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
- fck = clk_get_rate(dss.dss_clk);
- if (req_pck == dss.cache_req_pck && prate == dss.cache_prate &&
- dss.cache_dss_cinfo.fck == fck) {
- DSSDBG("dispc clock info found from cache.\n");
- *dss_cinfo = dss.cache_dss_cinfo;
- *dispc_cinfo = dss.cache_dispc_cinfo;
- return 0;
- }
-
- min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
- if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > max_dss_fck) {
- DSSERR("Requested pixel clock not possible with the current "
- "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
- "the constraint off.\n");
- min_fck_per_pck = 0;
- }
-
-retry:
- memset(&best_dss, 0, sizeof(best_dss));
- memset(&best_dispc, 0, sizeof(best_dispc));
-
- if (dss.dpll4_m4_ck == NULL) {
- struct dispc_clock_info cur_dispc;
- /* XXX can we change the clock on omap2? */
- fck = clk_get_rate(dss.dss_clk);
- fck_div = 1;
-
- dispc_find_clk_divs(req_pck, fck, &cur_dispc);
- match = 1;
-
- best_dss.fck = fck;
- best_dss.fck_div = fck_div;
-
- best_dispc = cur_dispc;
-
- goto found;
- } else {
- for (fck_div = dss.feat->fck_div_max; fck_div > 0; --fck_div) {
- struct dispc_clock_info cur_dispc;
-
- fck = prate / fck_div * dss.feat->dss_fck_multiplier;
-
- if (fck > max_dss_fck)
- continue;
-
- if (min_fck_per_pck &&
- fck < req_pck * min_fck_per_pck)
- continue;
-
- match = 1;
-
- dispc_find_clk_divs(req_pck, fck, &cur_dispc);
-
- if (abs(cur_dispc.pck - req_pck) <
- abs(best_dispc.pck - req_pck)) {
-
- best_dss.fck = fck;
- best_dss.fck_div = fck_div;
-
- best_dispc = cur_dispc;
-
- if (cur_dispc.pck == req_pck)
- goto found;
- }
- }
- }
-
-found:
- if (!match) {
- if (min_fck_per_pck) {
- DSSERR("Could not find suitable clock settings.\n"
- "Turning FCK/PCK constraint off and"
- "trying again.\n");
- min_fck_per_pck = 0;
- goto retry;
- }
-
- DSSERR("Could not find suitable clock settings.\n");
-
- return -EINVAL;
- }
-
- if (dss_cinfo)
- *dss_cinfo = best_dss;
- if (dispc_cinfo)
- *dispc_cinfo = best_dispc;
-
- dss.cache_req_pck = req_pck;
- dss.cache_prate = prate;
- dss.cache_dss_cinfo = best_dss;
- dss.cache_dispc_cinfo = best_dispc;
-
- return 0;
-}
-
void dss_set_venc_output(enum omap_dss_venc_type type)
{
int l = 0;
@@ -767,13 +696,11 @@ int dss_dpi_select_source(enum omap_channel channel)
static int dss_get_clocks(void)
{
struct clk *clk;
- int r;
- clk = clk_get(&dss.pdev->dev, "fck");
+ clk = devm_clk_get(&dss.pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get clock fck\n");
- r = PTR_ERR(clk);
- goto err;
+ return PTR_ERR(clk);
}
dss.dss_clk = clk;
@@ -782,8 +709,7 @@ static int dss_get_clocks(void)
clk = clk_get(NULL, dss.feat->clk_name);
if (IS_ERR(clk)) {
DSSERR("Failed to get %s\n", dss.feat->clk_name);
- r = PTR_ERR(clk);
- goto err;
+ return PTR_ERR(clk);
}
} else {
clk = NULL;
@@ -792,21 +718,12 @@ static int dss_get_clocks(void)
dss.dpll4_m4_ck = clk;
return 0;
-
-err:
- if (dss.dss_clk)
- clk_put(dss.dss_clk);
- if (dss.dpll4_m4_ck)
- clk_put(dss.dpll4_m4_ck);
-
- return r;
}
static void dss_put_clocks(void)
{
if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
- clk_put(dss.dss_clk);
}
static int dss_runtime_get(void)
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 610c8e5..8475893 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -268,14 +268,21 @@ void dss_set_dac_pwrdn_bgz(bool enable);
unsigned long dss_get_dpll4_rate(void);
int dss_calc_clock_rates(struct dss_clock_info *cinfo);
int dss_set_clock_div(struct dss_clock_info *cinfo);
-int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
- struct dispc_clock_info *dispc_cinfo);
+
+typedef bool (*dss_div_calc_func)(int fckd, unsigned long fck, void *data);
+bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data);
/* SDI */
int sdi_init_platform_driver(void) __init;
void sdi_uninit_platform_driver(void) __exit;
/* DSI */
+
+typedef bool (*dsi_pll_calc_func)(int regn, int regm, unsigned long fint,
+ unsigned long pll, void *data);
+typedef bool (*dsi_hsdiv_calc_func)(int regm_dispc, unsigned long dispc,
+ void *data);
+
#ifdef CONFIG_OMAP2_DSS_DSI
struct dentry;
@@ -292,12 +299,17 @@ void dsi_dump_clocks(struct seq_file *s);
void dsi_irq_handler(void);
u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
+unsigned long dsi_get_pll_clkin(struct platform_device *dsidev);
+
+bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
+ unsigned long out_min, dsi_hsdiv_calc_func func, void *data);
+bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
+ unsigned long pll_min, unsigned long pll_max,
+ dsi_pll_calc_func func, void *data);
+
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
int dsi_pll_set_clock_div(struct platform_device *dsidev,
struct dsi_clock_info *cinfo);
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
- unsigned long req_pck, struct dsi_clock_info *cinfo,
- struct dispc_clock_info *dispc_cinfo);
int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
bool enable_hsdiv);
void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
@@ -328,14 +340,6 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
WARN("%s: DSI not compiled in\n", __func__);
return -ENODEV;
}
-static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
- unsigned long req_pck,
- struct dsi_clock_info *dsi_cinfo,
- struct dispc_clock_info *dispc_cinfo)
-{
- WARN("%s: DSI not compiled in\n", __func__);
- return -ENODEV;
-}
static inline int dsi_pll_init(struct platform_device *dsidev,
bool enable_hsclk, bool enable_hsdiv)
{
@@ -356,6 +360,27 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
{
return NULL;
}
+
+static inline unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
+{
+ return 0;
+}
+
+static inline bool dsi_hsdiv_calc(struct platform_device *dsidev,
+ unsigned long pll, unsigned long out_min,
+ dsi_hsdiv_calc_func func, void *data)
+{
+ return false;
+}
+
+static inline bool dsi_pll_calc(struct platform_device *dsidev,
+ unsigned long clkin,
+ unsigned long pll_min, unsigned long pll_max,
+ dsi_pll_calc_func func, void *data)
+{
+ return false;
+}
+
#endif
/* DPI */
@@ -376,11 +401,15 @@ void dispc_enable_fifomerge(bool enable);
void dispc_enable_gamma_table(bool enable);
void dispc_set_loadmode(enum omap_dss_load_mode mode);
+typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
+ unsigned long pck, void *data);
+bool dispc_div_calc(unsigned long dispc,
+ unsigned long pck_min, unsigned long pck_max,
+ dispc_div_calc_func func, void *data);
+
bool dispc_mgr_timings_ok(enum omap_channel channel,
const struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
-void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
- struct dispc_clock_info *cinfo);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 7f791ae..77dbe0c 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -414,7 +414,7 @@ static const char * const omap5_dss_clk_source_names[] = {
};
static const struct dss_param_range omap2_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
[FEAT_PARAM_DSS_PCD] = { 2, 255 },
[FEAT_PARAM_DSIPLL_REGN] = { 0, 0 },
[FEAT_PARAM_DSIPLL_REGM] = { 0, 0 },
@@ -459,15 +459,15 @@ static const struct dss_param_range omap4_dss_param_range[] = {
};
static const struct dss_param_range omap5_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 200000000 },
+ [FEAT_PARAM_DSS_FCK] = { 0, 209250000 },
[FEAT_PARAM_DSS_PCD] = { 1, 255 },
[FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
[FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
[FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
[FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
- [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 150000, 52000000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
- [FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
+ [FEAT_PARAM_DSI_FCK] = { 0, 209250000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
};
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 72923645..17f4d55 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -328,7 +328,7 @@ static void hdmi_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static int __init hdmi_init_display(struct omap_dss_device *dssdev)
+static int hdmi_init_display(struct omap_dss_device *dssdev)
{
int r;
@@ -472,17 +472,12 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
* Input clock is predivided by N + 1
* out put of which is reference clk
*/
- if (dssdev->clocks.hdmi.regn == 0)
- pi->regn = HDMI_DEFAULT_REGN;
- else
- pi->regn = dssdev->clocks.hdmi.regn;
+
+ pi->regn = HDMI_DEFAULT_REGN;
refclk = clkin / pi->regn;
- if (dssdev->clocks.hdmi.regm2 == 0)
- pi->regm2 = HDMI_DEFAULT_REGM2;
- else
- pi->regm2 = dssdev->clocks.hdmi.regm2;
+ pi->regm2 = HDMI_DEFAULT_REGM2;
/*
* multiplier is pixel_clk/ref_clk
@@ -804,7 +799,7 @@ static int hdmi_get_clocks(struct platform_device *pdev)
{
struct clk *clk;
- clk = clk_get(&pdev->dev, "sys_clk");
+ clk = devm_clk_get(&pdev->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
return PTR_ERR(clk);
@@ -815,12 +810,6 @@ static int hdmi_get_clocks(struct platform_device *pdev)
return 0;
}
-static void hdmi_put_clocks(void)
-{
- if (hdmi.sys_clk)
- clk_put(hdmi.sys_clk);
-}
-
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
{
@@ -965,7 +954,7 @@ int hdmi_audio_config(struct omap_dss_audio *audio)
#endif
-static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *hdmi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const char *def_disp_name = omapdss_get_default_display_name();
@@ -993,7 +982,7 @@ static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *
return def_dssdev;
}
-static void __init hdmi_probe_pdata(struct platform_device *pdev)
+static int hdmi_probe_pdata(struct platform_device *pdev)
{
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
@@ -1003,11 +992,11 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
plat_dssdev = hdmi_find_dssdev(pdev);
if (!plat_dssdev)
- return;
+ return 0;
dssdev = dss_alloc_and_init_device(&pdev->dev);
if (!dssdev)
- return;
+ return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
@@ -1017,13 +1006,11 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
hdmi.ls_oe_gpio = priv->ls_oe_gpio;
hdmi.hpd_gpio = priv->hpd_gpio;
- dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
-
r = hdmi_init_display(dssdev);
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
- return;
+ return r;
}
r = omapdss_output_set_device(&hdmi.output, dssdev);
@@ -1031,7 +1018,7 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
dss_put_device(dssdev);
- return;
+ return r;
}
r = dss_add_device(dssdev);
@@ -1040,17 +1027,21 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
omapdss_output_unset_device(&hdmi.output);
hdmi_uninit_display(dssdev);
dss_put_device(dssdev);
- return;
+ return r;
}
+
+ return 0;
}
-static void __init hdmi_init_output(struct platform_device *pdev)
+static void hdmi_init_output(struct platform_device *pdev)
{
struct omap_dss_output *out = &hdmi.output;
out->pdev = pdev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
dss_register_output(out);
}
@@ -1063,7 +1054,7 @@ static void __exit hdmi_uninit_output(struct platform_device *pdev)
}
/* HDMI HW IP initialisation */
-static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *res;
int r;
@@ -1097,23 +1088,25 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
hdmi.ip_data.phy_offset = HDMI_PHY;
+ hdmi_init_output(pdev);
+
r = hdmi_panel_init();
if (r) {
DSSERR("can't init panel\n");
- goto err_panel_init;
+ return r;
}
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
- hdmi_init_output(pdev);
-
- hdmi_probe_pdata(pdev);
+ r = hdmi_probe_pdata(pdev);
+ if (r) {
+ hdmi_panel_exit();
+ hdmi_uninit_output(pdev);
+ pm_runtime_disable(&pdev->dev);
+ return r;
+ }
return 0;
-
-err_panel_init:
- hdmi_put_clocks();
- return r;
}
static int __exit hdmi_remove_child(struct device *dev, void *data)
@@ -1135,8 +1128,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- hdmi_put_clocks();
-
return 0;
}
@@ -1168,6 +1159,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
};
static struct platform_driver omapdss_hdmihw_driver = {
+ .probe = omapdss_hdmihw_probe,
.remove = __exit_p(omapdss_hdmihw_remove),
.driver = {
.name = "omapdss_hdmi",
@@ -1178,7 +1170,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
int __init hdmi_init_platform_driver(void)
{
- return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
+ return platform_driver_register(&omapdss_hdmihw_driver);
}
void __exit hdmi_uninit_platform_driver(void)
diff --git a/drivers/video/omap2/dss/output.c b/drivers/video/omap2/dss/output.c
index 79dea1a..5214df6 100644
--- a/drivers/video/omap2/dss/output.c
+++ b/drivers/video/omap2/dss/output.c
@@ -113,6 +113,7 @@ struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
return NULL;
}
+EXPORT_SYMBOL(omap_dss_get_output);
static const struct dss_mgr_ops *dss_mgr_ops;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index e903dd3..1a17dd1 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -943,13 +943,13 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
-static int __init rfbi_init_display(struct omap_dss_device *dssdev)
+static int rfbi_init_display(struct omap_dss_device *dssdev)
{
rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
return 0;
}
-static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *rfbi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const char *def_disp_name = omapdss_get_default_display_name();
@@ -977,7 +977,7 @@ static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *
return def_dssdev;
}
-static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
+static int rfbi_probe_pdata(struct platform_device *rfbidev)
{
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
@@ -986,11 +986,11 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
plat_dssdev = rfbi_find_dssdev(rfbidev);
if (!plat_dssdev)
- return;
+ return 0;
dssdev = dss_alloc_and_init_device(&rfbidev->dev);
if (!dssdev)
- return;
+ return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
@@ -998,7 +998,7 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
- return;
+ return r;
}
r = omapdss_output_set_device(&rfbi.output, dssdev);
@@ -1006,7 +1006,7 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
dss_put_device(dssdev);
- return;
+ return r;
}
r = dss_add_device(dssdev);
@@ -1014,17 +1014,21 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
DSSERR("device %s register failed: %d\n", dssdev->name, r);
omapdss_output_unset_device(&rfbi.output);
dss_put_device(dssdev);
- return;
+ return r;
}
+
+ return 0;
}
-static void __init rfbi_init_output(struct platform_device *pdev)
+static void rfbi_init_output(struct platform_device *pdev)
{
struct omap_dss_output *out = &rfbi.output;
out->pdev = pdev;
out->id = OMAP_DSS_OUTPUT_DBI;
out->type = OMAP_DISPLAY_TYPE_DBI;
+ out->name = "rfbi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
dss_register_output(out);
}
@@ -1037,7 +1041,7 @@ static void __exit rfbi_uninit_output(struct platform_device *pdev)
}
/* RFBI HW IP initialisation */
-static int __init omap_rfbihw_probe(struct platform_device *pdev)
+static int omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
struct resource *rfbi_mem;
@@ -1089,7 +1093,12 @@ static int __init omap_rfbihw_probe(struct platform_device *pdev)
rfbi_init_output(pdev);
- rfbi_probe_pdata(pdev);
+ r = rfbi_probe_pdata(pdev);
+ if (r) {
+ rfbi_uninit_output(pdev);
+ pm_runtime_disable(&pdev->dev);
+ return r;
+ }
return 0;
@@ -1133,6 +1142,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
};
static struct platform_driver omap_rfbihw_driver = {
+ .probe = omap_rfbihw_probe,
.remove = __exit_p(omap_rfbihw_remove),
.driver = {
.name = "omapdss_rfbi",
@@ -1143,7 +1153,7 @@ static struct platform_driver omap_rfbihw_driver = {
int __init rfbi_init_platform_driver(void)
{
- return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
+ return platform_driver_register(&omap_rfbihw_driver);
}
void __exit rfbi_uninit_platform_driver(void)
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 62b5374..0bcd302 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -41,6 +41,72 @@ static struct {
struct omap_dss_output output;
} sdi;
+struct sdi_clk_calc_ctx {
+ unsigned long pck_min, pck_max;
+
+ struct dss_clock_info dss_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+};
+
+static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+ unsigned long pck, void *data)
+{
+ struct sdi_clk_calc_ctx *ctx = data;
+
+ ctx->dispc_cinfo.lck_div = lckd;
+ ctx->dispc_cinfo.pck_div = pckd;
+ ctx->dispc_cinfo.lck = lck;
+ ctx->dispc_cinfo.pck = pck;
+
+ return true;
+}
+
+static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+{
+ struct sdi_clk_calc_ctx *ctx = data;
+
+ ctx->dss_cinfo.fck = fck;
+ ctx->dss_cinfo.fck_div = fckd;
+
+ return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
+}
+
+static int sdi_calc_clock_div(unsigned long pclk,
+ struct dss_clock_info *dss_cinfo,
+ struct dispc_clock_info *dispc_cinfo)
+{
+ int i;
+ struct sdi_clk_calc_ctx ctx;
+
+ /*
+ * DSS fclk gives us very few possibilities, so finding a good pixel
+ * clock may not be possible. We try multiple times to find the clock,
+ * each time widening the pixel clock range we look for, up to
+ * +/- 1MHz.
+ */
+
+ for (i = 0; i < 10; ++i) {
+ bool ok;
+
+ memset(&ctx, 0, sizeof(ctx));
+ if (pclk > 1000 * i * i * i)
+ ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
+ else
+ ctx.pck_min = 0;
+ ctx.pck_max = pclk + 1000 * i * i * i;
+
+ ok = dss_div_calc(ctx.pck_min, dpi_calc_dss_cb, &ctx);
+ if (ok) {
+ *dss_cinfo = ctx.dss_cinfo;
+ *dispc_cinfo = ctx.dispc_cinfo;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
{
struct omap_overlay_manager *mgr = dssdev->output->manager;
@@ -88,7 +154,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
+ r = sdi_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -182,7 +248,7 @@ void omapdss_sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
}
EXPORT_SYMBOL(omapdss_sdi_set_datapairs);
-static int __init sdi_init_display(struct omap_dss_device *dssdev)
+static int sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
@@ -202,7 +268,7 @@ static int __init sdi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *sdi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const char *def_disp_name = omapdss_get_default_display_name();
@@ -230,7 +296,7 @@ static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *p
return def_dssdev;
}
-static void __init sdi_probe_pdata(struct platform_device *sdidev)
+static int sdi_probe_pdata(struct platform_device *sdidev)
{
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
@@ -239,11 +305,11 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
plat_dssdev = sdi_find_dssdev(sdidev);
if (!plat_dssdev)
- return;
+ return 0;
dssdev = dss_alloc_and_init_device(&sdidev->dev);
if (!dssdev)
- return;
+ return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
@@ -251,7 +317,7 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
- return;
+ return r;
}
r = omapdss_output_set_device(&sdi.output, dssdev);
@@ -259,7 +325,7 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
dss_put_device(dssdev);
- return;
+ return r;
}
r = dss_add_device(dssdev);
@@ -267,17 +333,21 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
DSSERR("device %s register failed: %d\n", dssdev->name, r);
omapdss_output_unset_device(&sdi.output);
dss_put_device(dssdev);
- return;
+ return r;
}
+
+ return 0;
}
-static void __init sdi_init_output(struct platform_device *pdev)
+static void sdi_init_output(struct platform_device *pdev)
{
struct omap_dss_output *out = &sdi.output;
out->pdev = pdev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->type = OMAP_DISPLAY_TYPE_SDI;
+ out->name = "sdi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
dss_register_output(out);
}
@@ -289,11 +359,17 @@ static void __exit sdi_uninit_output(struct platform_device *pdev)
dss_unregister_output(out);
}
-static int __init omap_sdi_probe(struct platform_device *pdev)
+static int omap_sdi_probe(struct platform_device *pdev)
{
+ int r;
+
sdi_init_output(pdev);
- sdi_probe_pdata(pdev);
+ r = sdi_probe_pdata(pdev);
+ if (r) {
+ sdi_uninit_output(pdev);
+ return r;
+ }
return 0;
}
@@ -308,6 +384,7 @@ static int __exit omap_sdi_remove(struct platform_device *pdev)
}
static struct platform_driver omap_sdi_driver = {
+ .probe = omap_sdi_probe,
.remove = __exit_p(omap_sdi_remove),
.driver = {
.name = "omapdss_sdi",
@@ -317,7 +394,7 @@ static struct platform_driver omap_sdi_driver = {
int __init sdi_init_platform_driver(void)
{
- return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+ return platform_driver_register(&omap_sdi_driver);
}
void __exit sdi_uninit_platform_driver(void)
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 006caf3..74fdb3e 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -519,10 +519,6 @@ int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- if (dssdev->platform_enable)
- dssdev->platform_enable(dssdev);
-
-
r = venc_power_on(dssdev);
if (r)
goto err1;
@@ -533,8 +529,6 @@ int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
return 0;
err1:
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
omap_dss_stop_device(dssdev);
err0:
mutex_unlock(&venc.venc_lock);
@@ -551,9 +545,6 @@ void omapdss_venc_display_disable(struct omap_dss_device *dssdev)
omap_dss_stop_device(dssdev);
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
mutex_unlock(&venc.venc_lock);
}
@@ -642,7 +633,7 @@ void omapdss_venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
mutex_unlock(&venc.venc_lock);
}
-static int __init venc_init_display(struct omap_dss_device *dssdev)
+static int venc_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -721,7 +712,7 @@ static int venc_get_clocks(struct platform_device *pdev)
struct clk *clk;
if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
- clk = clk_get(&pdev->dev, "tv_dac_clk");
+ clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
return PTR_ERR(clk);
@@ -735,13 +726,7 @@ static int venc_get_clocks(struct platform_device *pdev)
return 0;
}
-static void venc_put_clocks(void)
-{
- if (venc.tv_dac_clk)
- clk_put(venc.tv_dac_clk);
-}
-
-static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *venc_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const char *def_disp_name = omapdss_get_default_display_name();
@@ -769,7 +754,7 @@ static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *
return def_dssdev;
}
-static void __init venc_probe_pdata(struct platform_device *vencdev)
+static int venc_probe_pdata(struct platform_device *vencdev)
{
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
@@ -778,21 +763,19 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
plat_dssdev = venc_find_dssdev(vencdev);
if (!plat_dssdev)
- return;
+ return 0;
dssdev = dss_alloc_and_init_device(&vencdev->dev);
if (!dssdev)
- return;
+ return -ENOMEM;
dss_copy_device_pdata(dssdev, plat_dssdev);
- dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
-
r = venc_init_display(dssdev);
if (r) {
DSSERR("device %s init failed: %d\n", dssdev->name, r);
dss_put_device(dssdev);
- return;
+ return r;
}
r = omapdss_output_set_device(&venc.output, dssdev);
@@ -800,7 +783,7 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
dss_put_device(dssdev);
- return;
+ return r;
}
r = dss_add_device(dssdev);
@@ -808,17 +791,21 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
DSSERR("device %s register failed: %d\n", dssdev->name, r);
omapdss_output_unset_device(&venc.output);
dss_put_device(dssdev);
- return;
+ return r;
}
+
+ return 0;
}
-static void __init venc_init_output(struct platform_device *pdev)
+static void venc_init_output(struct platform_device *pdev)
{
struct omap_dss_output *out = &venc.output;
out->pdev = pdev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->type = OMAP_DISPLAY_TYPE_VENC;
+ out->name = "venc.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
dss_register_output(out);
}
@@ -831,7 +818,7 @@ static void __exit venc_uninit_output(struct platform_device *pdev)
}
/* VENC HW IP initialisation */
-static int __init omap_venchw_probe(struct platform_device *pdev)
+static int omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
@@ -879,14 +866,19 @@ static int __init omap_venchw_probe(struct platform_device *pdev)
venc_init_output(pdev);
- venc_probe_pdata(pdev);
+ r = venc_probe_pdata(pdev);
+ if (r) {
+ venc_panel_exit();
+ venc_uninit_output(pdev);
+ pm_runtime_disable(&pdev->dev);
+ return r;
+ }
return 0;
err_panel_init:
err_runtime_get:
pm_runtime_disable(&pdev->dev);
- venc_put_clocks();
return r;
}
@@ -904,7 +896,6 @@ static int __exit omap_venchw_remove(struct platform_device *pdev)
venc_uninit_output(pdev);
pm_runtime_disable(&pdev->dev);
- venc_put_clocks();
return 0;
}
@@ -939,6 +930,7 @@ static const struct dev_pm_ops venc_pm_ops = {
};
static struct platform_driver omap_venchw_driver = {
+ .probe = omap_venchw_probe,
.remove = __exit_p(omap_venchw_remove),
.driver = {
.name = "omapdss_venc",
@@ -949,7 +941,7 @@ static struct platform_driver omap_venchw_driver = {
int __init venc_init_platform_driver(void)
{
- return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
+ return platform_driver_register(&omap_venchw_driver);
}
void __exit venc_uninit_platform_driver(void)
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 717f13a..c84bb8a 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2372,7 +2372,7 @@ static int omapfb_init_connections(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
struct omap_dss_output *out = dssdev->output;
- mgr = omap_dss_get_overlay_manager(dssdev->channel);
+ mgr = omap_dss_get_overlay_manager(out->dispc_channel);
if (!mgr || !out)
continue;
@@ -2406,7 +2406,7 @@ static int omapfb_init_connections(struct omapfb2_device *fbdev,
return 0;
}
-static int __init omapfb_probe(struct platform_device *pdev)
+static int omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
int r = 0;
@@ -2468,7 +2468,7 @@ static int __init omapfb_probe(struct platform_device *pdev)
if (fbdev->num_displays == 0) {
dev_err(&pdev->dev, "no displays\n");
- r = -EINVAL;
+ r = -EPROBE_DEFER;
goto cleanup;
}
@@ -2579,6 +2579,7 @@ static int __exit omapfb_remove(struct platform_device *pdev)
}
static struct platform_driver omapfb_driver = {
+ .probe = omapfb_probe,
.remove = __exit_p(omapfb_remove),
.driver = {
.name = "omapfb",
@@ -2586,36 +2587,13 @@ static struct platform_driver omapfb_driver = {
},
};
-static int __init omapfb_init(void)
-{
- DBG("omapfb_init\n");
-
- if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
- printk(KERN_ERR "failed to register omapfb driver\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit omapfb_exit(void)
-{
- DBG("omapfb_exit\n");
- platform_driver_unregister(&omapfb_driver);
-}
-
module_param_named(mode, def_mode, charp, 0);
module_param_named(vram, def_vram, charp, 0);
module_param_named(rotate, def_rotate, int, 0);
module_param_named(vrfb, def_vrfb, bool, 0);
module_param_named(mirror, def_mirror, bool, 0);
-/* late_initcall to let panel/ctrl drivers loaded first.
- * I guess better option would be a more dynamic approach,
- * so that omapfb reacts to new panels when they are loaded */
-late_initcall(omapfb_init);
-/*module_init(omapfb_init);*/
-module_exit(omapfb_exit);
+module_platform_driver(omapfb_driver);
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 6c984ea..97563c5 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -101,7 +101,6 @@ struct pxa3xx_gcu_priv {
dma_addr_t shared_phys;
struct resource *resource_mem;
struct miscdevice misc_dev;
- struct file_operations misc_fops;
wait_queue_head_t wait_idle;
wait_queue_head_t wait_free;
spinlock_t spinlock;
@@ -369,15 +368,20 @@ pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
/* Misc device layer */
+static inline struct pxa3xx_gcu_priv *file_dev(struct file *file)
+{
+ struct miscdevice *dev = file->private_data;
+ return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
+}
+
static ssize_t
-pxa3xx_gcu_misc_write(struct file *filp, const char *buff,
+pxa3xx_gcu_misc_write(struct file *file, const char *buff,
size_t count, loff_t *offp)
{
int ret;
unsigned long flags;
struct pxa3xx_gcu_batch *buffer;
- struct pxa3xx_gcu_priv *priv =
- container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops);
+ struct pxa3xx_gcu_priv *priv = file_dev(file);
int words = count / 4;
@@ -450,11 +454,10 @@ pxa3xx_gcu_misc_write(struct file *filp, const char *buff,
static long
-pxa3xx_gcu_misc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+pxa3xx_gcu_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
- struct pxa3xx_gcu_priv *priv =
- container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops);
+ struct pxa3xx_gcu_priv *priv = file_dev(file);
switch (cmd) {
case PXA3XX_GCU_IOCTL_RESET:
@@ -471,11 +474,10 @@ pxa3xx_gcu_misc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
static int
-pxa3xx_gcu_misc_mmap(struct file *filp, struct vm_area_struct *vma)
+pxa3xx_gcu_misc_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned int size = vma->vm_end - vma->vm_start;
- struct pxa3xx_gcu_priv *priv =
- container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops);
+ struct pxa3xx_gcu_priv *priv = file_dev(file);
switch (vma->vm_pgoff) {
case 0:
@@ -574,6 +576,13 @@ free_buffers(struct platform_device *dev,
priv->free = NULL;
}
+static const struct file_operations misc_fops = {
+ .owner = THIS_MODULE,
+ .write = pxa3xx_gcu_misc_write,
+ .unlocked_ioctl = pxa3xx_gcu_misc_ioctl,
+ .mmap = pxa3xx_gcu_misc_mmap
+};
+
static int pxa3xx_gcu_probe(struct platform_device *dev)
{
int i, ret, irq;
@@ -601,14 +610,9 @@ static int pxa3xx_gcu_probe(struct platform_device *dev)
* container_of(). This isn't really necessary as we have a fixed minor
* number anyway, but this is to avoid statics. */
- priv->misc_fops.owner = THIS_MODULE;
- priv->misc_fops.write = pxa3xx_gcu_misc_write;
- priv->misc_fops.unlocked_ioctl = pxa3xx_gcu_misc_ioctl;
- priv->misc_fops.mmap = pxa3xx_gcu_misc_mmap;
-
priv->misc_dev.minor = MISCDEV_MINOR,
priv->misc_dev.name = DRV_NAME,
- priv->misc_dev.fops = &priv->misc_fops,
+ priv->misc_dev.fops = &misc_fops,
/* register misc device */
ret = misc_register(&priv->misc_dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8dab163..bd3ae32 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -108,7 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
+ if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
@@ -256,7 +256,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len))
return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
+ if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
}
@@ -341,7 +341,7 @@ static int init_vqs(struct virtio_balloon *vb)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
+ if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
BUG();
virtqueue_kick(vb->stats_vq);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ffd7e7d..5217baf 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -24,27 +24,6 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
-/* virtio guest is communicating with a virtual "device" that actually runs on
- * a host processor. Memory barriers are used to control SMP effects. */
-#ifdef CONFIG_SMP
-/* Where possible, use SMP barriers which are more lightweight than mandatory
- * barriers, because mandatory barriers control MMIO effects on accesses
- * through relaxed memory I/O windows (which virtio-pci does not use). */
-#define virtio_mb(vq) \
- do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
-#define virtio_rmb(vq) \
- do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
-#define virtio_wmb(vq) \
- do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
-#else
-/* We must force memory ordering even if guest is UP since host could be
- * running on another CPU, but SMP barriers are defined to barrier() in that
- * configuration. So fall back to mandatory barriers instead. */
-#define virtio_mb(vq) mb()
-#define virtio_rmb(vq) rmb()
-#define virtio_wmb(vq) wmb()
-#endif
-
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
@@ -119,16 +98,36 @@ struct vring_virtqueue
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
+ unsigned int *count)
+{
+ return sg_next(sg);
+}
+
+static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
+ unsigned int *count)
+{
+ if (--(*count) == 0)
+ return NULL;
+ return sg + 1;
+}
+
/* Set up an indirect table of descriptors and add it to the queue. */
-static int vring_add_indirect(struct vring_virtqueue *vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- gfp_t gfp)
+static inline int vring_add_indirect(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ struct scatterlist *(*next)
+ (struct scatterlist *, unsigned int *),
+ unsigned int total_sg,
+ unsigned int total_out,
+ unsigned int total_in,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ gfp_t gfp)
{
struct vring_desc *desc;
unsigned head;
- int i;
+ struct scatterlist *sg;
+ int i, n;
/*
* We require lowmem mappings for the descriptors because
@@ -137,25 +136,31 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
*/
gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
- desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
+ desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
if (!desc)
return -ENOMEM;
- /* Transfer entries from the sg list into the indirect page */
- for (i = 0; i < out; i++) {
- desc[i].flags = VRING_DESC_F_NEXT;
- desc[i].addr = sg_phys(sg);
- desc[i].len = sg->length;
- desc[i].next = i+1;
- sg++;
+ /* Transfer entries from the sg lists into the indirect page */
+ i = 0;
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
+ desc[i].flags = VRING_DESC_F_NEXT;
+ desc[i].addr = sg_phys(sg);
+ desc[i].len = sg->length;
+ desc[i].next = i+1;
+ i++;
+ }
}
- for (; i < (out + in); i++) {
- desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
- desc[i].addr = sg_phys(sg);
- desc[i].len = sg->length;
- desc[i].next = i+1;
- sg++;
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
+ desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
+ desc[i].addr = sg_phys(sg);
+ desc[i].len = sg->length;
+ desc[i].next = i+1;
+ i++;
+ }
}
+ BUG_ON(i != total_sg);
/* Last one doesn't continue. */
desc[i-1].flags &= ~VRING_DESC_F_NEXT;
@@ -176,29 +181,20 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-/**
- * virtqueue_add_buf - expose buffer to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: the description of the buffer(s).
- * @out_num: the number of sg readable by other side
- * @in_num: the number of sg which are writable (after readable ones)
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
- */
-int virtqueue_add_buf(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data,
- gfp_t gfp)
+static inline int virtqueue_add(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ struct scatterlist *(*next)
+ (struct scatterlist *, unsigned int *),
+ unsigned int total_out,
+ unsigned int total_in,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- unsigned int i, avail, uninitialized_var(prev);
+ struct scatterlist *sg;
+ unsigned int i, n, avail, uninitialized_var(prev), total_sg;
int head;
START_USE(vq);
@@ -218,46 +214,54 @@ int virtqueue_add_buf(struct virtqueue *_vq,
}
#endif
+ total_sg = total_in + total_out;
+
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
- if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
- head = vring_add_indirect(vq, sg, out, in, gfp);
+ if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
+ head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
+ total_in,
+ out_sgs, in_sgs, gfp);
if (likely(head >= 0))
goto add_head;
}
- BUG_ON(out + in > vq->vring.num);
- BUG_ON(out + in == 0);
+ BUG_ON(total_sg > vq->vring.num);
+ BUG_ON(total_sg == 0);
- if (vq->vq.num_free < out + in) {
+ if (vq->vq.num_free < total_sg) {
pr_debug("Can't add buf len %i - avail = %i\n",
- out + in, vq->vq.num_free);
+ total_sg, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
- if (out)
+ if (out_sgs)
vq->notify(&vq->vq);
END_USE(vq);
return -ENOSPC;
}
/* We're about to use some buffers from the free list. */
- vq->vq.num_free -= out + in;
-
- head = vq->free_head;
- for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
- vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vring.desc[i].addr = sg_phys(sg);
- vq->vring.desc[i].len = sg->length;
- prev = i;
- sg++;
+ vq->vq.num_free -= total_sg;
+
+ head = i = vq->free_head;
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
+ vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vring.desc[i].addr = sg_phys(sg);
+ vq->vring.desc[i].len = sg->length;
+ prev = i;
+ i = vq->vring.desc[i].next;
+ }
}
- for (; in; i = vq->vring.desc[i].next, in--) {
- vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
- vq->vring.desc[i].addr = sg_phys(sg);
- vq->vring.desc[i].len = sg->length;
- prev = i;
- sg++;
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
+ vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
+ vq->vring.desc[i].addr = sg_phys(sg);
+ vq->vring.desc[i].len = sg->length;
+ prev = i;
+ i = vq->vring.desc[i].next;
+ }
}
/* Last one doesn't continue. */
vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
@@ -276,7 +280,7 @@ add_head:
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
- virtio_wmb(vq);
+ virtio_wmb(vq->weak_barriers);
vq->vring.avail->idx++;
vq->num_added++;
@@ -290,9 +294,122 @@ add_head:
return 0;
}
+
+/**
+ * virtqueue_add_buf - expose buffer to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: the description of the buffer(s).
+ * @out_num: the number of sg readable by other side
+ * @in_num: the number of sg which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ */
+int virtqueue_add_buf(struct virtqueue *_vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in,
+ void *data,
+ gfp_t gfp)
+{
+ struct scatterlist *sgs[2];
+
+ sgs[0] = sg;
+ sgs[1] = sg + out;
+
+ return virtqueue_add(_vq, sgs, sg_next_arr,
+ out, in, out ? 1 : 0, in ? 1 : 0, data, gfp);
+}
EXPORT_SYMBOL_GPL(virtqueue_add_buf);
/**
+ * virtqueue_add_sgs - expose buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of terminated scatterlists.
+ * @out_num: the number of scatterlists readable by other side
+ * @in_num: the number of scatterlists which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ */
+int virtqueue_add_sgs(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp)
+{
+ unsigned int i, total_out, total_in;
+
+ /* Count them first. */
+ for (i = total_out = total_in = 0; i < out_sgs; i++) {
+ struct scatterlist *sg;
+ for (sg = sgs[i]; sg; sg = sg_next(sg))
+ total_out++;
+ }
+ for (; i < out_sgs + in_sgs; i++) {
+ struct scatterlist *sg;
+ for (sg = sgs[i]; sg; sg = sg_next(sg))
+ total_in++;
+ }
+ return virtqueue_add(_vq, sgs, sg_next_chained,
+ total_out, total_in, out_sgs, in_sgs, data, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
+
+/**
+ * virtqueue_add_outbuf - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of scatterlists (need not be terminated!)
+ * @num: the number of scatterlists readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ */
+int virtqueue_add_outbuf(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
+
+/**
+ * virtqueue_add_inbuf - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of scatterlists (need not be terminated!)
+ * @num: the number of scatterlists writable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ */
+int virtqueue_add_inbuf(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
+
+/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @vq: the struct virtqueue
*
@@ -312,7 +429,7 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
START_USE(vq);
/* We need to expose available array entries before checking avail
* event. */
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
old = vq->vring.avail->idx - vq->num_added;
new = vq->vring.avail->idx;
@@ -436,7 +553,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
}
/* Only get used array entries after they have been exposed by host. */
- virtio_rmb(vq);
+ virtio_rmb(vq->weak_barriers);
last_used = (vq->last_used_idx & (vq->vring.num - 1));
i = vq->vring.used->ring[last_used].id;
@@ -460,7 +577,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
* the read in the next get_buf call. */
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
}
#ifdef DEBUG
@@ -513,7 +630,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
if (unlikely(more_used(vq))) {
END_USE(vq);
return false;
@@ -553,7 +670,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
/* TODO: tune this threshold */
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 73b3383..1c15ee7 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -47,9 +47,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct inode *ino = file_inode(file);
- struct proc_dir_entry *dp = PDE(ino);
- struct zorro_dev *z = dp->data;
+ struct zorro_dev *z = PDE_DATA(file_inode(file));
struct ConfigDev cd;
loff_t pos = *ppos;
@@ -141,7 +139,7 @@ static int __init zorro_proc_attach_device(unsigned int slot)
&zorro_autocon[slot]);
if (!entry)
return -ENOMEM;
- entry->size = sizeof(struct zorro_dev);
+ proc_set_size(entry, sizeof(struct zorro_dev));
return 0;
}